In VISUALIZATION VIBES project Study 2, participants completed an attitutde eliciation survey, asking questions about their attitude toward (5) stimulus images (data visualizations). Each participant was randomly assigned to one of 6 stimulus blocks, each containing 1 image from each of (4) pseudo-categories (ranging from most abstract to most figural). Each participant started by responding to questions for a single ‘common’ stimulus (that is thus super-powered as it was seen by all participants). Two participant recruitment pools were used: Prolific, with a smaller set of participants recruited from Tumblr (to replicate and compare survey results to Study 1 interviews with participants sourced from Tumblr).

This notebook contains code to replicate quantitative analysis of data from Study 2 reported in the CHI submission. Note that due to limited space, we were unable to report results for all stimulus blocks, and all possible analyses. A separate set of R notebooks are included in the supplementary materials that document analysis of the other blocks not reported here.

This notebook includes analysis and exploration of the data set at the stimulus category level

1 SETUP

We start by importing data files previously wrangled in 0_VIBES_S2_wrangling.Rmd.

1.1 Import Data

############## IMPORT REFERENCE FILES
ref_stimuli <- readRDS("data/input/REFERENCE/ref_stimuli.rds")
ref_surveys <- readRDS("data/input/REFERENCE/ref_surveys.rds")
ref_labels <- readRDS("data/input/REFERENCE/ref_labels.rds")
ref_labels_abs <- readRDS("data/input/REFERENCE/ref_labels_abs.rds")

############## SETUP Graph Labels
ref_stim_id <- levels(ref_stimuli$ID)
ref_cat_questions <- c("MAKER_ID","MAKER_AGE","MAKER_GENDER")
ref_free_response <- c("MAKER_DETAIL", "MAKER_EXPLAIN", "TOOL_DETAIL", "CHART_EXPLAIN")
ref_conf_questions <- c("MAKER_CONF", "AGE_CONF", "GENDER_CONF", "TOOL_CONF")
ref_sd_questions <- rownames(ref_labels)
ref_sd_questions_abs <- rownames(ref_labels_abs)
  

# ref_blocks <- c("block1", "block2", "block3", "block4", "block5", "block6")
ref_blocks <- c(1,2,3,4,5,6)
############## IMPORT DATA FILES
# df_data <- readRDS("data/output/df_data.rds") #1 row per participant — WIDE
df_participants <- readRDS("data/output/df_participants.rds") #1 row per participant — demographic
df_questions <- readRDS("data/output/df_questions.rds") #1 row per question — LONG
df_sd_questions_wide <- readRDS("data/output/df_sd_questions_wide.rds") # only sd questions WIDE


df_tools <- readRDS("data/output/df_tools.rds") #multiselect format for tools Question
df_actions <- readRDS("data/output/df_actions.rds") # multiselect format for action Question
# # df_graphs_full <- readRDS("data/output/df_graphs_full.rds") #includes free response data

df_graphs <- readRDS("data/output/df_graphs.rds") #only categorical and numeric questions
df_sd_questions_long <- readRDS("data/output/df_sd_questions_long.rds") # only sd questions LONG

### DATA FILES WITH (VARIABLE-WISE) Z-SCORED SEMANTIC DIFFERENTIAL QS 
df_graphs_z <- readRDS("data/output/df_graphs_z.rds") #only categorical and numeric questions
df_sd_questions_long_z <- readRDS("data/output/df_sd_questions_long_z.rds") # only sd questions LONG


### DATA FILES WITH ABSOLUTE VALUE SEMANTIC DIFFERENTIAL QS 
df_graphs_abs <- readRDS("data/output/df_graphs_abs.rds") #only categorical and numeric questions
df_sd_questions_long_abs <- readRDS("data/output/df_sd_questions_long_abs.rds") # only sd questions LONG

1.2 Set up Graphing

############## SETUP Colour Palettes
#https://www.r-bloggers.com/2022/06/custom-colour-palettes-for-ggplot2/

## list of color pallettes
my_colors = list(
  politics = c("#184aff","#5238bf", "#4f4a52" ,"#84649c", "#ff0000"),
  blackred = c("black","red"),
  greys = c("#707070","#999999","#C2C2C2"),
  greens = c("#ADC69D","#81A06D","#567E39","#2D5D16","#193E0A"),
  smallgreens = c("#ADC69D","#567E39","#193E0A"),
  olives = c("#CDCEA1","#B8B979","#A0A054","#78783F","#50502A","#35351C"),
  lightblues = c("#96C5D2","#61A2B2","#3C8093","#2C6378","#1F4A64"),
  darkblues = c("#7AAFE1","#3787D2","#2A73B7","#225E96","#1A4974","#133453"),
  reds = c("#D9B8BD","#CE98A2","#B17380","#954E5F","#78263E","#62151F"),
  traffic = c("#CE98A2","#81A06D","yellow"),
  questions = c("#B17380","#3787D2", "#567E39", "#EE897F"),
  tools= c("#D55662","#EE897F","#F5D0AD","#A0B79B","#499678","#2D363A"),
  encounter = c("#729B7D","#8E8E8E"),
  actions = c("#2A363B","#039876ff","#99b898ff","#fdcea8ff","#ff837bff","#e84a60ff"),
  platforms = c("#5D93EA","#FF70CD", "#3BD3F5", "#8B69B5","black"),
  amy_gradient =  c("#ac57aa", "#9e5fa4", "#90689f", "#827099", "#747894", "#66818e", "#578988", "#499183", "#3b997d", "#2da278", "#1faa72"),
  my_favourite_colours = c("#702963", "#637029",    "#296370")
                
)

## function for using palettes
my_palettes = function(name, n, all_palettes = my_colors, type = c("discrete","continuous"), direction = c("1","-1")) {
  palette = all_palettes[[name]]
  if (missing(n)) {
    n = length(palette)
  }
  type = match.arg(type)
  out = switch(type,
               continuous = grDevices::colorRampPalette(palette)(n),
               discrete = palette[1:n]
  )
  out = switch(direction,
               "1" = out,
               "-1" = palette[n:1])
  structure(out, name = name, class = "palette")
}
############## RETURNS SD STACKED AND COLORED BY BY X
## LOOP STYLE
multi_sd <- function (data, left, right, x, y, color) {

  # g <- ggplot(df, aes(y = .data[[x]], x = {{y}}, color = {{color}}))+
  g <- ggplot(data, aes(y = .data[[x]], x = .data[[y]], color = .data[[color]]))+
  geom_boxplot(width = 0.5) +
  geom_jitter(width = 0.1, alpha=0.5) +
    
  scale_y_continuous(limits=c(-1,101)) +
  labs(x="", y="") +
  coord_flip() +
  guides(
    y = guide_axis_manual(labels = left),
    y.sec = guide_axis_manual(labels = right)
  ) + theme_minimal()

  return(g)
}


############## RETURNS SINGLE SD 
## LOOP STYLE
single_sd <- function (data, left, right, x) {

  g <- ggplot(data, aes(y = {{x}}, x = ""))+
  geom_boxplot(width = 0.5) +
  geom_jitter(width = 0.1, alpha=0.5) +
  scale_y_continuous(limits=c(-1,101)) +
  labs(x="", y="") +
  coord_flip() +
  guides(
    y = guide_axis_manual(labels = left),
    y.sec = guide_axis_manual(labels = right)
  ) + theme_minimal()

  return(g)
}


# ######## RETURNS SINGLE SD
# ##  APPLY STYLE
plot_sd = function (data, column, type, mean, facet, facet_by, boxplot, labels) {

  ggplot(data, aes(y = .data[[column]], x="")) +
    {if(boxplot) geom_boxplot(width = 0.5) } +
    geom_jitter(width = 0.1, alpha=0.2, {if(facet) aes(color=.data[[facet_by]])}) +
    {if(mean)
      stat_summary(fun="mean", geom="point", shape=20, size=5, color="blue", fill="blue")
      } +
    {if(mean)
      ## assumes data has been passed in with mean column at m
      # stat_summary(fun="mean", geom="text", colour="blue",  fontface = "bold",
      #            vjust=-1.25, hjust = 0.50, aes( label=round(..y.., digits=0)))
      stat_summary(fun="mean", geom="text", colour="blue",  fontface = "bold",
                 vjust=-1.25, hjust = 0.50, aes( label=round(..y.., digits=0)))
      } +

    {if(facet) facet_grid(.data[[facet_by]] ~ .)} +
    # scale_y_continuous(limits=c(-1,101)) +
    labs(x="", y="") +
    coord_flip()  +
    {if(type == "S")
      guides(
        y = guide_axis_manual(labels = labels[column,"left"]),
        y.sec = guide_axis_manual(labels = labels[column,"right"])
      )} +
    {if(type == "Q")
      guides(
        y = guide_axis_manual(labels = labels[q,"left"]),
        y.sec = guide_axis_manual(labels = labels[q,"right"])
      )} +
  theme_minimal()  +
     labs (
       caption = column
     ) + easy_remove_legend()
}

2 STIMULUS-CATEGORY

For the purpose of optimizing aesthetic diversity of stimuli seen by each participant, we organized the stimuli into 4 approximate ‘categories’ of abstraction, where A = the most abstract, and D the most figural. Each participant first saw the common stimulus (B0-0) followed by one stimulus from each category (order randomized) in a block structure.

2.1 SAMPLE

2.1.1 Sample Demographics

df <- df_participants

## FOR DESCRIPTIVES PARAGRAPH
# #PROLIFIC
df.p <- df %>% filter(Distribution == "PROLIFIC")
desc.gender.p <- table(df.p$D_gender) %>% prop.table()
names(desc.gender.p) <- levels(df.p$D_gender)
p_participants <- nrow(df.p)

# #TUMBLR
df.t <- df %>% filter(Distribution == "TUMBLR")
desc.gender.t <- table(df.t$D_gender) %>% prop.table()
names(desc.gender.t) <- levels(df.t$D_gender)
t_participants <- nrow(df.t)

For study 2, a total of 318 participants were recruited from US-located English speaking users of TUMBLR (n = 78) and PROLIFIC (n = 240).

240 individuals from PROLIFIC participated in Study 2, ( 54% Female, 42% Male, 3% Non-binary, 1% Other).

78 individuals from Tumblr participated in Study 2, ( 36% Female, 5% Male, 40% Non-binary, 19% Other). Note that a higher proportion of participants recruited from TUMBLR report identities other than cis-gender Female and cis-gender Male.

2.1.2 Study Response Time

df <- df_participants

## for descriptives paragraph
p.desc.duration <- psych::describe(df %>% filter(Distribution=="PROLIFIC") %>% pull(duration.min))
t.desc.duration <- psych::describe(df %>% filter(Distribution=="TUMBLR") %>% pull(duration.min))

PROLIFIC SAMPLE (n = 240 ) participant response times ranged from 13.97 to 216.18 minutes, with a mean response time of 42.49 minutes, SD = 21.15.

TUMBLR SAMPLE (n = 78 ) participant response times ranged from 10.88 to 227.57 minutes, with a mean response time of 51.93 minutes, SD = 35.47.

rm(df, df.p, df.t, p.desc.duration, t.desc.duration, desc.gender.p, desc.gender.t, p_participants, t_participants)
#full data except for common stimulus B0-0
df_cat <- df_graphs %>% 
  filter(STIMULUS != "B0-0") %>% 
  mutate(
    STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY),
    STUDY = "" #dummy variable for univariate visualizations
  )
# %>%
#   mutate(MAKER_ID = fct_rev(MAKER_ID))

2.2 CONFIDENCE

When asking participants to identify the type, age and gender of the maker of a visualization, we also asked participants to indicate their confidence in these choices.

Across all participants and all stimuli, are these (categorical) questions answered with the same degree of confidence?

Here we examine both the central tendency (mean) and shape of the distribution for each confidence variable.

df <- df_cat %>% select(PID, Distribution, STIMULUS_CATEGORY, STIMULUS,MAKER_CONF, AGE_CONF, GENDER_CONF, TOOL_CONF) %>% 
  pivot_longer(
    cols = c(MAKER_CONF, AGE_CONF, GENDER_CONF, TOOL_CONF),
    names_to = "QUESTION",
    values_to = "CONFIDENCE"
  ) %>% 
  mutate(
    QUESTION = factor(QUESTION, levels=c("MAKER_CONF","AGE_CONF","GENDER_CONF","TOOL_CONF"  ) )
  ) %>% 
  group_by(QUESTION, STIMULUS_CATEGORY) %>% 
  mutate(
    m=round(mean(CONFIDENCE),0) #calc mean for showing in plots 
  )


## B
## CONFIDENCE ACROSS QUESTIONS (all stimuli, all Pps)
## BOXPLOT W/ JITTER
B <-
  df %>% 
  ggplot(aes(x=STIMULUS_CATEGORY, y= CONFIDENCE, fill = STIMULUS_CATEGORY)) + 
  geom_jitter(aes(color = STIMULUS_CATEGORY), alpha = 0.25, position=position_dodge2(width = 0.25)) + 
  geom_boxplot(width = 0.5) + 
  facet_wrap(~QUESTION)+
  ## MEAN
    stat_summary(fun=mean, geom="text", colour="blue",  fontface = "bold", size=3,
                 vjust=+0.5, hjust = -1.5, aes( label=round(m, digits=0)))+
    stat_summary(fun=mean, geom="point", size=2, color="blue", fill="blue") +
  theme_minimal() + easy_remove_legend()
  labs(title = "Confidence by Question and Stimulus Category", caption = "(mean in blue)")
## $title
## [1] "Confidence by Question and Stimulus Category"
## 
## $caption
## [1] "(mean in blue)"
## 
## attr(,"class")
## [1] "labels"
## R
## CONFIDENCE ACROSS QUESTIONS (all stimuli, all Pps)
## RIDGEPLOT W/ INTERVAL MEAN
R <-
  df %>% 
  ggplot(aes(x=CONFIDENCE, y=STIMULUS_CATEGORY, fill=STIMULUS_CATEGORY)) + 
    geom_density_ridges(scale = 0.65, alpha = 0.75, quantile_lines = TRUE) +
    scale_x_continuous(limits = c(0,100))+
    # scale_fill_manual(values = my_palettes(name="questions", direction = "-1"), name = "",  guide = guide_legend(reverse = TRUE)) +   
    stat_pointinterval(side = "bottom", scale = 0.7, slab_linewidth = NA, point_interval = "mean_qi") +
    facet_wrap(~QUESTION)+
  ## MEAN
    stat_summary(fun=mean, geom="text", colour="blue",  fontface = "bold", size=3,
                vjust=+2.5, hjust = 0.50, aes( label=round(m, digits=0)))+
    stat_summary(fun=mean, geom="point", size=2, color="blue", fill="blue") +
  theme_minimal() + 
  labs(title = "Confidence by Question and Stimulus Category", y = "QUESTION", caption =" (mean in blue)") + 
  easy_remove_legend()

B

R
## Picking joint bandwidth of 6.35
## Picking joint bandwidth of 5.91
## Picking joint bandwidth of 7.16
## Picking joint bandwidth of 6.14

2.3 MAKER ID

Participants were asked:

Who do you think is most likely responsible for having this image created?
options: (select one). The response is stored as MAKER_ID

  • business or corporation

  • journalist or news outlet

  • educational or academic institution

  • government or political organization

  • other organization

  • an individual]

Participants were also asked: Please rate your confidence in this choice. The response is stored as MAKER_CONF .

#FILTER DATASET
df <- df_cat


## D
## MAKER IDENTIFICATION AGGREGATED (all)
## GGSTATSPLOT
##############################
#hack for consistent ordering of ggstats bar plot
dx <- df %>% mutate( MAKER_ID = fct_rev(MAKER_ID) )
S <-   ggbarstats( data = dx, x = MAKER_ID, y = STIMULUS_CATEGORY,
                   results.subtitle = FALSE,
                   legend.title = "MAKER ID") + 
    scale_fill_manual(values = my_palettes(name="reds", direction = "1")) +
    theme_minimal() +
    labs( title = "",  x = "", y="") + 
    theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
##############################


## H
## HALF EYE SLAB GGDIST
##############################
H <-
  df %>% 
  group_by(MAKER_ID, STIMULUS_CATEGORY) %>% 
  mutate(count = n(), m = mean(MAKER_CONF)) %>% 
  ggplot(aes(y = MAKER_CONF, x = fct_rev(MAKER_ID), fill = fct_rev(MAKER_ID))) + 
  stat_halfeye(scale=0.55, density="bounded", point_interval = "mean_qi", normalize= "all") +
  facet_wrap(~STIMULUS_CATEGORY)+
  ## MEAN
  stat_summary(fun=mean, geom="text", colour="blue",  fontface = "bold", size = 2,
               vjust=2.5, hjust = .5, aes( label=round(m, digits=0)))+
  stat_summary(fun=mean, geom="point", shape=20, size=3, color="blue", fill="blue") +
  scale_fill_manual(values = my_palettes(name="reds", direction = "-1"), guide = guide_legend(reverse = TRUE)) +
  geom_text(aes(label= paste0("n=",count) ,  y = 5), color = "black",
            size = 3, nudge_x=0.35) + 
  labs(y="Maker ID Confidence", x="") + 
  theme_minimal() + 
  easy_remove_legend()+
  coord_flip() 
##############################
  

S + plot_annotation(
  title = "Maker ID by STIMULUS CATEGORY",
  # subtitle = "the categories of MAKER ID were chosen in similar proportion, 
  # and both the mean (in blue) and shape of distribution of confidence scores is similar across values of Maker ID",
  caption = "(blue indicates mean)"
)

H + plot_annotation(
  title = "Maker ID Confidence by STIMULUS CATEGORY",
  # subtitle = "the categories of MAKER ID were chosen in similar proportion, 
  # and both the mean (in blue) and shape of distribution of confidence scores is similar across values of Maker ID",
  caption = "(blue indicates mean)"
)

2.4 MAKER AGE

Participants were asked: Take a moment to imagine the person(s) responsible for creating the image. What generation are they most likely from?
options: (select one) The response was saved as MAKER_AGE

  • boomers (60+ years old)

  • Generation X (44-59 years old)

  • Millennials (28-43 years old)

  • Generation Z (12 - 27 years old]

Participants were asked: Please rate your confidence in this choice. The response is stored as AGE_CONF .

#FILTER DATASET
df <- df_cat


## D
## MAKER IDENTIFICATION AGGREGATED (all)
## GGSTATSPLOT
##############################
#hack for consistent ordering of ggstats bar plot
dx <- df %>% mutate( MAKER_AGE = fct_rev(MAKER_AGE) )
S <-   ggbarstats( data = dx, x = MAKER_AGE, y = STIMULUS_CATEGORY,
                   legend.title = "MAKER AGE",
                   results.subtitle = FALSE) + 
    scale_fill_manual(values = my_palettes(name="lightblues", direction = "1")) +
    theme_minimal() +
    labs( title = "",  x = "", y="") + 
    theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
##############################


  
## H
## HALF EYE SLAB GGDIST
##############################
H <- df %>% 
  group_by(MAKER_AGE, STIMULUS_CATEGORY) %>% 
  mutate(count = n(), m = mean(AGE_CONF)) %>% 
  ggplot(aes(y = AGE_CONF, x = fct_rev(MAKER_AGE), fill = fct_rev(MAKER_AGE))) + 
  stat_halfeye(scale=0.55, density="bounded", point_interval = "mean_qi", normalize= "all") +
  facet_wrap(~STIMULUS_CATEGORY)+
  ## MEAN
  stat_summary(fun=mean, geom="text", colour="blue",  fontface = "bold", size = 2,
               vjust=2.5, hjust = .5, aes( label=round(..y.., digits=0)))+
  stat_summary(fun=mean, geom="point", shape=20, size=3, color="blue", fill="blue") +
  scale_fill_manual(values = my_palettes(name="lightblues", direction = "-1"), guide = guide_legend(reverse = TRUE)) +
  geom_text(aes(label= paste0("n=",count) ,  y = 5), color = "black",
            size = 3, nudge_x=0.35) + 
  labs(y="Maker AGE Confidence", x="") + 
  theme_minimal() + 
  easy_remove_legend()+
  coord_flip() 
##############################


S  + plot_annotation(
  title = "Maker AGE by STIMULUS CATEGORY",
  # subtitle = "The value
  # distribution of confidence scores is similar across values of Maker AGE",
  caption = "(blue indicates mean)"
)

H + plot_annotation(
  title = "Maker AGE Confidence by STIMULUS CATEGORY",
  # subtitle = "The value
  # distribution of confidence scores is similar across values of Maker AGE",
  caption = "(blue indicates mean)"
)

2.5 MAKER GENDER

Participants were asked: Take a moment to imagine the person(s) responsible for creating the image. What gender do they most likely identify with?
options: [female / male / other ] (select one). Responses were stored as MAKER_GENDER.

Participants were asked: Please rate your confidence in this choice. The response is stored as GENDER_CONF .

#FILTER DATASET
df <- df_cat


## D
## MAKER IDENTIFICATION AGGREGATED (all)
## GGSTATSPLOT
##############################
#hack for consistent ordering of ggstats bar plot
dx <- df %>% mutate( MAKER_AGE = fct_rev(MAKER_AGE) )
S <-   ggbarstats( data = dx, x = MAKER_GENDER, y = STIMULUS_CATEGORY,
                   legend.title = "MAKER GENDER", 
                   results.subtitle = FALSE) + 
    scale_fill_manual(values = my_palettes(name="smallgreens", direction = "1")) +
    theme_minimal() +
    labs( title = "",  x = "", y="") + 
    theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
##############################



## H
## HALF EYE SLAB GGDIST
##############################
H <- df %>% 
  group_by(MAKER_GENDER, STIMULUS_CATEGORY) %>% 
  mutate(count = n(), m = mean(GENDER_CONF)) %>% 
  ggplot(aes(y = GENDER_CONF, x = MAKER_GENDER, fill = MAKER_GENDER)) + 
  stat_halfeye(scale=0.55, density="bounded", point_interval = "mean_qi", normalize= "all") +
  facet_wrap(~STIMULUS_CATEGORY) + 
  ## MEAN
  stat_summary(fun=mean, geom="text", colour="blue",  fontface = "bold", size = 2,
               vjust=2.5, hjust = .5, aes( label=round(..y.., digits=0)))+
  stat_summary(fun=mean, geom="point", shape=20, size=3, color="blue", fill="blue") +
  scale_fill_manual(values = my_palettes(name="greens", direction = "-1"), guide = guide_legend(reverse = TRUE)) +
  geom_text(aes(label= paste0("n=",count) ,  y = 5), color = "black",
            size = 3, nudge_x=0.35) + 
  labs(y="Maker GENDER Confidence", x="") + 
  theme_minimal() + 
  easy_remove_legend()+
  coord_flip() 
##############################

  

S + plot_annotation(
  title = "Maker GENDER by STIMULUS CATEGORY",
  # subtitle = "The value
  # distribution of confidence scores is similar across values of Maker AGE",
  caption = "(blue indicates mean)"
)

H + plot_annotation(
  title = "Maker GENDER Confidence by STIMULUS_CATEGORY",
  # subtitle = "The value
  # distribution of confidence scores is similar across values of Maker AGE",
  caption = "(blue indicates mean)"
)

2.6 TOOL ID

Participants were asked: What tools do you think were most likely used to create this image?
options: (select all that apply). The response was saved as variable TOOL_ID (multi-select)

  • basic graphic design software (e.g. Canva, or similar)

  • advanced graphic design software (e.g. Adobe Illustrator, Figma, or similar)

  • data visualization software (e.g. Tableau, PowerBI, or similar)

  • general purpose software (e.g. MS Word/Excel, Google Sheets, or similar)

  • programming language (e.g. R, python, javascript, or similar)

Participants were asked: Please rate your confidence in this choice. The response is stored as TOOL_CONF .

#FILTER DATASET
df <- df_tools %>% 
  mutate(
    STUDY = "",
    STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY)
  )


## D
## MAKER IDENTIFICATION AGGREGATED (all)
## GGSTATSPLOT
##############################
#hack for consistent ordering of ggstats bar plot
S <-   ggbarstats( data = df, x = TOOL_ID, y = STIMULUS_CATEGORY,
                   legend.title = "TOOL ID", results.subtitle = FALSE) + 
    scale_fill_paletteer_d("awtools::a_palette", direction = 1)+
    theme_minimal() +
    labs( title = "",  x = "", y="") + 
    theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
##############################



## H
## HALF EYE SLAB GGDIST
##############################
H <-  df %>% 
  group_by(TOOL_ID, STIMULUS_CATEGORY) %>% 
  mutate(count = n(), m = mean(TOOL_CONF)) %>% 
  ggplot(aes(y = TOOL_CONF, x = TOOL_ID, fill = TOOL_ID)) + 
  stat_halfeye(scale=0.55, density="bounded", point_interval = "mean_qi", normalize= "all") +
  facet_wrap(~STIMULUS_CATEGORY) + 
  ## MEAN
  stat_summary(fun=mean, geom="text", colour="blue",  fontface = "bold", size = 2,
               vjust=2.5, hjust = .5, aes( label=round(..y.., digits=0)))+
  stat_summary(fun=mean, geom="point", shape=20, size=3, color="blue", fill="blue") +
  scale_fill_manual(values = my_palettes(name="tools", direction = "1"), guide = guide_legend(reverse = TRUE)) +
  geom_text(aes(label= paste0("n=",count) ,  y = 5), color = "black",
            size = 3, nudge_x=0.35) + 
  labs(y="TOOL ID Confidence", x="", caption="(mean in blue) (median in red)") + 
  theme_minimal() + 
  easy_remove_legend()+
  coord_flip() 
##############################
  

S + plot_annotation(
  title = "TOOL ID by STIMULUS CATEGORY",
  # subtitle = "The value
  # distribution of confidence scores is similar across values of Maker AGE",
  caption = "(blue indicates mean)"
)

H + plot_annotation(
  title = "TOOL ID Confidence by STIMULUS CATEGORY",
  # subtitle = "The value
  # distribution of confidence scores is similar across values of Maker AGE",
  caption = "(blue indicates mean)"
)

2.7 ENCOUNTER CHOICE

The first question each participant saw in each stimulus block was: As you’re scrolling through your feed, you see this image. What would you do?

options: keep scrolling, pause and look at the image. (select one) The response was saved as variable ENCOUNTER

## B
## ENCOUNTER  BY STIMULUS
## GGSTATSPLOT
df_cat %>% 
  ggbarstats(  
            x = ENCOUNTER, y = STIMULUS_CATEGORY,
            legend.title = "ENCOUNTER",
            results.subtitle = FALSE) + 
    scale_fill_manual(values = my_palettes(name="encounter", direction = "-1"))+
    theme_minimal() + 
    labs( title = "ENCOUNTER Choice by STIMULUS_CATEGORY", subtitle = "", x = "")
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

2.8 ACTION CHOICE

The last question participants were asked in each stimulus block was: Imagine you encounter the following image while scrolling. Which of the following are you most likely to do?

options: (select all that apply). The response was saved as variable CHART_ACTION

  • post a comment

  • share/repost

  • share/repost WITH comment

  • look up more information about the topic or source

  • unfollow/block the source

  • NOTHING—just keep scrolling

## B
## ACTION  BY STIMULUS
## GGSTATSPLOT
df_actions %>% mutate(
  CHART_ACTION = fct_rev(CHART_ACTION),
  STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY),
  STUDY="") %>% 
  ggbarstats( x = CHART_ACTION, y = STIMULUS_CATEGORY,
            legend.title = "CHART ACTION",
            results.subtitle = FALSE) + 
    # scale_fill_paletteer_d("awtools::a_palette", direction = 1)+
    scale_fill_manual(values = my_palettes(name="actions", direction = "1"))+
    theme_minimal() + 
    labs( title = "ACTION Choice by CATEGORY ", subtitle = "", x = "")
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

## B
## ACTION  BY STIMULUS
## GGSTATSPLOT
df_actions %>% mutate(
  CHART_ACTION4 = fct_rev(CHART_ACTION4),
  STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY),
  STUDY="") %>% 
  ggbarstats( x = CHART_ACTION4, y = STIMULUS_CATEGORY,
            legend.title = "collapsed CHART ACTION",
            results.subtitle = FALSE) + 
    # scale_fill_paletteer_d("awtools::a_palette", direction = 1)+
    scale_fill_manual(values = my_palettes(name="actions", direction = "1"))+
    theme_minimal() + 
    labs( title = "collapsed ACTION Choice4 by CATEGORY ", subtitle = "", x = "")
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

## B
## ACTION  BY STIMULUS
## GGSTATSPLOT
df_actions %>% mutate(
  CHART_ACTION3 = fct_rev(CHART_ACTION3),
  STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY),
  STUDY="") %>% 
  ggbarstats( x = CHART_ACTION3, y = STIMULUS_CATEGORY,
            legend.title = "collapsed CHART ACTION",
            results.subtitle = FALSE) + 
    # scale_fill_paletteer_d("awtools::a_palette", direction = 1)+
    scale_fill_manual(values = my_palettes(name="actions", direction = "1"))+
    theme_minimal() + 
    labs( title = "collapsed ACTION Choice3 by CATEGORY ", subtitle = "", x = "")
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

## B
## ACTION  BY STIMULUS
## GGSTATSPLOT
df_actions %>% mutate(
  CHART_ACTION2 = fct_rev(CHART_ACTION2),
  STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY),
  STUDY="") %>% 
  ggbarstats( x = CHART_ACTION2, y = STIMULUS_CATEGORY,
            legend.title = "collapsed CHART ACTION",
            results.subtitle = FALSE) + 
    # scale_fill_paletteer_d("awtools::a_palette", direction = 1)+
    scale_fill_manual(values = my_palettes(name="actions", direction = "1"))+
    theme_minimal() + 
    labs( title = "collapsed ACTION Choice2 by CATEGORY ", subtitle = "", x = "")
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

2.9 SEMANTIC DIFFERENTIALS

Participants were also asked to rate certain characteristics of the chart, or its maker, along a semantic differential scale, implemented in Qualtrics as a continuous slider ranging from 0 -> 100 with biploar adjectives at the end of each scale. The slider defaulted to the center point (50), and the interface displayed the numeric value of the slider position as a tooltip while the element had focus. Note that on both touch and mouse devices participants could interact with the survey element as a slider (i.e. click and and drag, or touch and drag) or as a visual analogue scale (i.e. click or tap on position along the scale).

2.9.1 Full Scales

The SD scores visualized here are in the same form as the participants’ response scale (slider from 0-100).

#### GROUPED DENSITY RIDGES#############################################################################
  # setup dataframe 
  df <- df_sd_questions_long %>% select(1:8, QUESTION, STIMULUS_CATEGORY, value)  
  d <- left_join( x = df, y = ref_labels, 
                  by = c("QUESTION" = "ref_sd_questions")) %>% 
        mutate(
          category=factor(category, levels=c("COMPETENCY","MAKER","CHART")),
          QUESTION = factor(QUESTION, levels=ref_sd_questions),
          STIMULUS_CATEGORY = factor(STIMULUS_CATEGORY, levels = c("A","B","C","D","F")))%>% 
    group_by(QUESTION) %>% 
    mutate(m=median(value)) ## calc median for printing on graph
  
  
(  c <-ggplot(d, aes(x = value, y = fct_rev(QUESTION), fill=STIMULUS_CATEGORY))+ 
    geom_density_ridges(scale = 0.75, alpha = 0.5, panel_scaling = TRUE) +
    ## MEDIAN
    stat_summary(fun=median, geom="text", fontface = "bold", size= 2.2,
                vjust=-0.5, hjust = 0.50, aes(label=round(m, digits=0)))+
    stat_summary(fun=median, geom="point", size=1) +
    facet_grid2(.~STIMULUS_CATEGORY)+
    # geom_density_ridges(scale = 1, quantile_lines = TRUE, alpha = 0.25) 
    guides(
      y = guide_axis_manual(labels = rev(ref_labels$left)),
      y.sec = guide_axis_manual(labels = rev(ref_labels$right))
    ) +
    labs(title = "by STIMULUS CATEGORY", y = "", caption = "(point is median)") +
    cowplot::draw_text(text = ref_sd_questions, x = 40, y= ref_sd_questions, size = 6, vjust=2) + ##raw
    # # cowplot::draw_text(text = ref_sd_questions, x = -4, y= ref_sd_questions,size = 10, vjust=-2) + ##z-score
    theme_minimal() + easy_remove_legend()
)
## Picking joint bandwidth of 5.16
## Picking joint bandwidth of 5.32
## Picking joint bandwidth of 7.21
## Picking joint bandwidth of 6.27
## Picking joint bandwidth of 6.14

if(graph_save){
    ggsave(plot = c, path="figs/level_category/distributions", filename =paste0("combined_by_category","_ridges.png"), units = c("in"), width = 10, height = 14  )
}  
## Picking joint bandwidth of 5.16
## Picking joint bandwidth of 5.32
## Picking joint bandwidth of 7.21
## Picking joint bandwidth of 6.27
## Picking joint bandwidth of 6.14
rm(df,d, c)

2.9.2 Absolute Values

Here the scale of the semantic differential questions have been collapsed, such that 0 is the midpoint of the scale (indicating uncertainty, or not strongly indicating either of the labelled traits) and both 100 and 0 are 50 (indicating a strong signal toward either of the labelled traits).

  #### GROUPED DENSITY RIDGES#############################################################################
  # setup dataframe 
  df <- df_sd_questions_long_abs %>% select(1:8, QUESTION, STIMULUS_CATEGORY, value)  
  d <- left_join( x = df, y = ref_labels_abs, 
                  by = c("QUESTION" = "ref_sd_questions_abs")) %>% 
        mutate(
          category=factor(category, levels=c("COMPETENCY","MAKER","CHART")),
          QUESTION = factor(QUESTION, levels=ref_sd_questions),
          STIMULUS_CATEGORY = factor(STIMULUS_CATEGORY, levels = c("A","B","C","D","F")))%>% 
    group_by(QUESTION) %>% 
    mutate(m=median(value)) ## calc median for printing on graph
  
  
(  c <-ggplot(d, aes(x = value, y = fct_rev(QUESTION), fill=STIMULUS_CATEGORY))+ 
    geom_density_ridges(scale = 0.75, alpha = 0.5, panel_scaling = TRUE) +
    facet_grid2(.~STIMULUS_CATEGORY)+
     ## MEDIAN
    stat_summary(fun=median, geom="text", fontface = "bold", size= 2.2,
                vjust=-0.5, hjust = 0.50, aes(label=round(m, digits=0)))+
    stat_summary(fun=median, geom="point", size=1) +
    # geom_density_ridges(scale = 1, quantile_lines = TRUE, alpha = 0.25) 
    guides(
      y = guide_axis_manual(labels = rev(ref_labels_abs$left)),
      y.sec = guide_axis_manual(labels = rev(ref_labels_abs$right))
    ) +
    labs(title = "by STIMULUS CATEGORY (absolute value)", y = "") +
    cowplot::draw_text(text = ref_sd_questions_abs, x = 20, y= ref_sd_questions_abs, size = 6, vjust=2) + ##raw
    theme_minimal() + easy_remove_legend()
)
## Picking joint bandwidth of 3.78
## Picking joint bandwidth of 3.72
## Picking joint bandwidth of 4.28
## Picking joint bandwidth of 3.97
## Picking joint bandwidth of 3.84

if(graph_save == TRUE){
    ggplot2::ggsave(plot = c, path="figs/level_category/distributions", filename =paste0("ABS_combined_by_category","_ridges.png"), units = c("in"), width = 10, height = 14  )
}
## Picking joint bandwidth of 3.78
## Picking joint bandwidth of 3.72
## Picking joint bandwidth of 4.28
## Picking joint bandwidth of 3.97
## Picking joint bandwidth of 3.84
rm(df, d, c)

2.10 CORRELATIONS

2.10.1 correlation matrices — semantic differential

df <- df_graphs %>% 
  filter(STIMULUS != "B0-0") %>% 
  select(
          MAKER_DESIGN, MAKER_DATA, 
          MAKER_POLITIC, MAKER_ARGUE,
          MAKER_SELF, MAKER_ALIGN, MAKER_TRUST, 
          CHART_TRUST, CHART_INTENT, CHART_LIKE, CHART_BEAUTY, 
          PID)

print("FULL CORRELATION NO RANDOM EFFECT")
## [1] "FULL CORRELATION NO RANDOM EFFECT"
## CALCULATE full correlations with no random effects
c <- df %>%  correlation(partial=FALSE, include_factors=FALSE)
(s <- c %>% summary(redundant = FALSE))
## # Correlation Matrix (pearson-method)
## 
## Parameter     | CHART_BEAUTY | CHART_LIKE | CHART_INTENT | CHART_TRUST | MAKER_TRUST | MAKER_ALIGN | MAKER_SELF | MAKER_ARGUE | MAKER_POLITIC | MAKER_DATA
## ----------------------------------------------------------------------------------------------------------------------------------------------------------
## MAKER_DESIGN  |     -0.41*** |   -0.33*** |        -0.03 |    -0.17*** |    -0.17*** |    -0.14*** |    0.16*** |       -0.04 |       0.13*** |    0.36***
## MAKER_DATA    |     -0.19*** |   -0.24*** |      0.33*** |    -0.39*** |    -0.39*** |    -0.23*** |    0.18*** |    -0.17*** |       0.13*** |           
## MAKER_POLITIC |     -0.21*** |   -0.28*** |      0.21*** |    -0.29*** |    -0.36*** |    -0.44*** |    0.46*** |    -0.29*** |               |           
## MAKER_ARGUE   |      0.24*** |    0.30*** |     -0.35*** |     0.44*** |     0.51*** |     0.40*** |   -0.46*** |             |               |           
## MAKER_SELF    |     -0.36*** |   -0.46*** |      0.34*** |    -0.52*** |    -0.60*** |    -0.65*** |            |             |               |           
## MAKER_ALIGN   |      0.40*** |    0.51*** |     -0.32*** |     0.57*** |     0.64*** |             |            |             |               |           
## MAKER_TRUST   |      0.36*** |    0.49*** |     -0.47*** |     0.74*** |             |             |            |             |               |           
## CHART_TRUST   |      0.46*** |    0.59*** |     -0.50*** |             |             |             |            |             |               |           
## CHART_INTENT  |     -0.12*** |   -0.21*** |              |             |             |             |            |             |               |           
## CHART_LIKE    |      0.83*** |            |              |             |             |             |            |             |               |           
## 
## p-value adjustment method: Holm (1979)
plot(s, show_data="point") + labs(title = "Correlation Matrix",
               subtitle="(full correlation; pearson method; Holm p-value adjustment)") + theme_minimal()

print("PARTIAL CORRELATION WITH PID AS RANDOM EFFECT")
## [1] "PARTIAL CORRELATION WITH PID AS RANDOM EFFECT"
#CALCULATE partial correlations with PID as random effect
## (this isolates correlation pairwise factoring out other variables)
c <- df %>% correlation(partial=TRUE,multilevel = TRUE)
(s <- c %>% summary(redundant = FALSE ))
## # Correlation Matrix (pearson-method)
## 
## Parameter     | CHART_BEAUTY | CHART_LIKE | CHART_INTENT | CHART_TRUST | MAKER_TRUST | MAKER_ALIGN | MAKER_SELF | MAKER_ARGUE | MAKER_POLITIC | MAKER_DATA
## ----------------------------------------------------------------------------------------------------------------------------------------------------------
## MAKER_DESIGN  |     -0.27*** |       0.01 |     -0.17*** |        0.05 |       -0.03 |        0.07 |       0.07 |        0.08 |          0.08 |    0.32***
## MAKER_DATA    |         0.09 |      -0.03 |      0.21*** |    -0.13*** |    -0.15*** |       -0.02 |   -0.13*** |    9.23e-03 |         -0.04 |           
## MAKER_POLITIC |         0.01 |      -0.02 |         0.03 |        0.04 |       -0.06 |    -0.19*** |    0.22*** |       -0.06 |               |           
## MAKER_ARGUE   |         0.06 |      -0.03 |     -0.12*** |        0.05 |     0.16*** |    9.31e-03 |   -0.16*** |             |               |           
## MAKER_SELF    |     5.85e-03 |      -0.07 |         0.06 |       -0.02 |    -0.18*** |    -0.34*** |            |             |               |           
## MAKER_ALIGN   |         0.01 |     0.11** |         0.06 |        0.08 |     0.24*** |             |            |             |               |           
## MAKER_TRUST   |        -0.06 |       0.02 |      -0.11** |     0.40*** |             |             |            |             |               |           
## CHART_TRUST   |         0.03 |    0.23*** |     -0.26*** |             |             |             |            |             |               |           
## CHART_INTENT  |         0.03 |       0.03 |              |             |             |             |            |             |               |           
## CHART_LIKE    |      0.74*** |            |              |             |             |             |            |             |               |           
## 
## p-value adjustment method: Holm (1979)
###### VIS WITH CORRELATION PACKAGE
#SEE [correlation] PLOT
g <- plot(s, show_data = "point",   show_text = "label",
     stars=TRUE, show_legend=FALSE,
     show_statistic = FALSE, show_ci = FALSE) + 
     theme_minimal()+
     labs(title = "Correlation Matrix — SD Questions", 
          subtitle="(partial correlation; pearson method; Holm p-value adjustment; participant as random effect)")
     # text = list(fontface = "italic")
g

ggsave(g, scale =1, filename = "figs/level_category/heatmaps/partial_correlation_no_b00.png", width = 14, height = 6, dpi = 320, limitsize = FALSE)

#PLOT GAUSSIAN GRAPH MODEL
# plot(c)


###### VIS WITH CORRPLOT <- -- customizable but can't save to file ARGH

## GET THE MATRIX
m <- as.matrix(c)


## JUST CIRCLES
corrplot(m, method = 'circle', type = 'lower',
         order = 'original', diag = FALSE, addCoef.col = "#7A7A7A",
         tl.col = "black")

These plots depict the PARTIAL CORRELATION pairwise between variables (partial correlation factors out influence of other variables), with participant ID as a random effect. The resulting values are pearson moment-correlation coefficients ranging of -1 (direct negative) to +1 direct positive correlation. These correlations are calculated on the full scale semantic differential questions (i.e. with the 0 - 100 range, where 1 and 100 are end points and 50 is the central point)

2.10.2 correlation matrices — semantic differential — absolute values

df <- df_graphs_abs %>% 
  filter(STIMULUS != "B0-0") %>% 
  select(
          MAKER_DESIGN, MAKER_DATA, 
          MAKER_POLITIC, MAKER_ARGUE,
          MAKER_SELF, MAKER_ALIGN, MAKER_TRUST, 
          CHART_TRUST, CHART_INTENT, CHART_LIKE, CHART_BEAUTY, 
          PID)

print("FULL CORRELATION NO RANDOM EFFECT")
## [1] "FULL CORRELATION NO RANDOM EFFECT"
## CALCULATE full correlations with no random effects
c <- df %>%  correlation(partial=FALSE, include_factors=FALSE)
(s <- c %>% summary(redundant = FALSE))
## # Correlation Matrix (pearson-method)
## 
## Parameter     | CHART_BEAUTY | CHART_LIKE | CHART_INTENT | CHART_TRUST | MAKER_TRUST | MAKER_ALIGN | MAKER_SELF | MAKER_ARGUE | MAKER_POLITIC | MAKER_DATA
## ----------------------------------------------------------------------------------------------------------------------------------------------------------
## MAKER_DESIGN  |      0.24*** |    0.23*** |      0.13*** |     0.20*** |     0.19*** |     0.14*** |    0.16*** |     0.18*** |       0.14*** |    0.42***
## MAKER_DATA    |      0.18*** |    0.18*** |      0.29*** |     0.25*** |     0.24*** |     0.12*** |    0.19*** |     0.21*** |         0.06* |           
## MAKER_POLITIC |      0.17*** |    0.24*** |      0.11*** |     0.31*** |     0.34*** |     0.60*** |    0.50*** |     0.47*** |               |           
## MAKER_ARGUE   |      0.17*** |    0.21*** |      0.23*** |     0.38*** |     0.46*** |     0.46*** |    0.56*** |             |               |           
## MAKER_SELF    |      0.21*** |    0.28*** |      0.22*** |     0.41*** |     0.51*** |     0.64*** |            |             |               |           
## MAKER_ALIGN   |      0.24*** |    0.32*** |      0.21*** |     0.45*** |     0.54*** |             |            |             |               |           
## MAKER_TRUST   |      0.15*** |    0.26*** |      0.30*** |     0.62*** |             |             |            |             |               |           
## CHART_TRUST   |      0.32*** |    0.44*** |      0.40*** |             |             |             |            |             |               |           
## CHART_INTENT  |      0.18*** |    0.21*** |              |             |             |             |            |             |               |           
## CHART_LIKE    |      0.69*** |            |              |             |             |             |            |             |               |           
## 
## p-value adjustment method: Holm (1979)
plot(s, show_data="point") + labs(title = "Correlation Matrix",
               subtitle="(full correlation; pearson method; Holm p-value adjustment)") + theme_minimal()

print("PARTIAL CORRELATION WITH PID AS RANDOM EFFECT")
## [1] "PARTIAL CORRELATION WITH PID AS RANDOM EFFECT"
#CALCULATE partial correlations with PID as random effect
## (this isolates correlation pairwise factoring out other variables)
c <- df %>% correlation(partial=TRUE, multilevel = TRUE)
(s <- c %>% summary(redundant = FALSE ))
## # Correlation Matrix (pearson-method)
## 
## Parameter     | CHART_BEAUTY | CHART_LIKE | CHART_INTENT | CHART_TRUST | MAKER_TRUST | MAKER_ALIGN | MAKER_SELF | MAKER_ARGUE | MAKER_POLITIC | MAKER_DATA
## ----------------------------------------------------------------------------------------------------------------------------------------------------------
## MAKER_DESIGN  |         0.08 |       0.05 |        -0.06 |   -2.56e-03 |        0.04 |       -0.02 |       0.01 |        0.04 |          0.05 |    0.31***
## MAKER_DATA    |         0.03 |  -9.61e-03 |      0.17*** |        0.04 |        0.07 |       -0.05 |       0.04 |        0.06 |         -0.07 |           
## MAKER_POLITIC |    -5.90e-03 |       0.04 |        -0.04 |        0.03 |       -0.05 |     0.37*** |      0.10* |     0.24*** |               |           
## MAKER_ARGUE   |         0.02 |      -0.02 |         0.04 |        0.04 |     0.14*** |    4.39e-03 |    0.26*** |             |               |           
## MAKER_SELF    |    -2.83e-04 |       0.03 |         0.02 |   -6.34e-03 |     0.12*** |     0.35*** |            |             |               |           
## MAKER_ALIGN   |         0.03 |       0.06 |     5.07e-03 |        0.05 |     0.22*** |             |            |             |               |           
## MAKER_TRUST   |       -0.10* |      -0.01 |         0.06 |     0.40*** |             |             |            |             |               |           
## CHART_TRUST   |         0.06 |    0.20*** |      0.24*** |             |             |             |            |             |               |           
## CHART_INTENT  |    -1.63e-03 |  -8.18e-03 |              |             |             |             |            |             |               |           
## CHART_LIKE    |      0.62*** |            |              |             |             |             |            |             |               |           
## 
## p-value adjustment method: Holm (1979)
###### VIS WITH CORRELATION PACKAGE
#SEE [correlation] PLOT
g <- plot(s, show_data = "point",   show_text = "label",
     stars=TRUE, show_legend=FALSE,
     show_statistic = FALSE, show_ci = FALSE) + 
     theme_minimal()+
     labs(title = "Correlation Matrix — SD Questions — absolute values", 
          subtitle="(partial correlation; pearson method; Holm p-value adjustment; participant as random effect)")
     # text = list(fontface = "italic")
g

ggsave(g, scale =1, filename = "figs/level_category/heatmaps/partial_correlation_abs_no_b00.png", width = 14, height = 6, dpi = 320, limitsize = FALSE)

#PLOT GAUSSIAN GRAPH MODEL
# plot(c)


###### VIS WITH CORRPLOT <- -- customizable but can't save to file ARGH

## GET THE MATRIX
m <- as.matrix(c)


## JUST CIRCLES
corrplot(m, method = 'circle', type = 'lower',
         order = 'original', diag = FALSE, addCoef.col = "#7A7A7A",
         tl.col = "black")

3 EXPLORATORY QUESTIONS

3.0.1 ENCOUNTER ~ CATEGORY

Are more figural (e.g. figures with more embellishments) graphs more likely to be interacted with than less figural graphs? To address this question, we explore the relationship between STIMULUS_CATEGORY and ENCOUNTER (whether they would likely scroll past and stop and look at the graph).

3.0.1.1 visualize

df <- df_graphs %>%
  ## FILTER OUT B0-0 COMMON STIMULUS (so cells can be balanced)
  filter(STIMULUS != "B0-0") %>% 
  select(STIMULUS, STIMULUS_CATEGORY, BLOCK, ENCOUNTER, CHART_LIKE, CHART_TRUST, PID) %>% 
  mutate(
    STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY), #REVERSE FACTOR ORDER SO A IS REFERENCE
    ENCOUNTER = fct_rev(ENCOUNTER) #REVERSE SO SCROLL IS REFERENCE
    ## (only used if not filtering out B0-0)
    ## RECODE #recode b00 graph as category D [bc it fits in that category]
    # STIMULUS_CATEGORY = fct_recode(STIMULUS_CATEGORY, D="F") 
  ) %>% droplevels()

## CATEGORY
## GGSTATSPLOT
##############################
ggbarstats( data = df, x = ENCOUNTER, y = STIMULUS_CATEGORY,
            results.subtitle = FALSE) + 
    scale_fill_manual(values = my_palettes(name="encounter", direction = "1")) +
    theme_minimal() +
    labs( title = "ENCOUNTER by CATEGORY",  x = "", y="",
          subtitle = "the more figural categories (C,D) have a higher proportion of engagement") +
    theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

##############################

## BLOCK
## GGSTATSPLOT
##############################
ggbarstats( data = df, x = ENCOUNTER, y = BLOCK,
            results.subtitle = FALSE) + 
    scale_fill_manual(values = my_palettes(name="encounter", direction = "1")) +
    theme_minimal() +
    labs( title = "ENCOUNTER by BLOCK",  x = "", y="",
          subtitle = "very little variance in proportion across blocks (as expected)") +
    theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

##############################



# BLOCK / CATEGORY
# GGSTATSPLOT
##############################
x <- grouped_ggbarstats( data = df, x = ENCOUNTER, y = BLOCK,   grouping.var=STIMULUS_CATEGORY,
                    results.subtitle = FALSE) + 
    scale_fill_manual(values = my_palettes(name="encounter", direction = "1")) + 
    theme_minimal() +
    # labs( title = "",  x = "", y="") + 
    theme(aspect.ratio = 1) + easy_remove_legend()
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
##############################

(x[[1]] + scale_fill_manual(values = my_palettes(name="encounter", direction = "1")) + labs(title = "CATEGORY A", subtitle = "some variance across category") + 
x[[2]] + scale_fill_manual(values = my_palettes(name="encounter", direction = "1")) + labs(title = "CATEGORY B", subtitle = "alot of variance across category")) / 
(x[[3]] + scale_fill_manual(values = my_palettes(name="encounter", direction = "1")) + labs(title = "CATEGORY C", subtitle = "alot of variance across category") +
x[[4]] + scale_fill_manual(values = my_palettes(name="encounter", direction = "1")) + theme_ggstatsplot() + labs(title = "CATEGORY D", subtitle = "very little variance across category")) + plot_annotation(title = "ENCOUNTER by BLOCK and CATEGORY")
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

INTERPRETATION _Here we see that when aggregated, it appears that more figural categories (C,D) have more engagement. However, when we visualize individual blocks (i.e. stimuli) within a particular category, we see a great deal of variance. This indicates that features of a particular stimulus may be stronger predictors of engagement than the degree of embellishment.

3.0.1.2 model fit

Is stimulus or category a better predictor of engagement? Here we fit a series of mixed effects logistic regression models, predicting ENCOUNTER (reference category = SCROLL) by STIMULUS_CATEGORY and BLOCK to determine if variance in encounter choice is best explained by the stimulus category (i.e. level of embellishment) or unique features of the stimulus (i.e. embellishment can be engaging or not engaging).

Parameter estimate: intercept = Log Odds of (SCROLL) responses in REFERENCE (exponetiate for odds) EB1 = Log Odds of ODDS of SCROLL response in CONTROL condition Parameter estimate: = Log Odds (Log OR; change in odds for correct response in impasse (vs) control [log scale]) = ODDS RATIO of correct response in IMPASSE (vs) CONTROL Null hypothesis: the odds for a correct response does not change, or decreases Alternative hypothesis: the odds of a correct response increases

df <- df_graphs %>%
  ## FILTER OUT B0-0 COMMON STIMULUS (so cells can be balanced)
  filter(STIMULUS != "B0-0") %>% 
  select(STIMULUS, STIMULUS_CATEGORY, BLOCK, ENCOUNTER, CHART_LIKE, CHART_TRUST, PID) %>% 
  mutate(
    STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY), #REVERSE FACTOR ORDER SO A IS REFERENCE
    ENCOUNTER = fct_rev(ENCOUNTER) #REVERSE SO SCROLL IS REFERENCE
    
    ## (only used if not filtering out B0-0)
    ## RECODE #recode b00 graph as category D [bc it fits in that category]
    # STIMULUS_CATEGORY = fct_recode(STIMULUS_CATEGORY, D="F") 
  ) %>% droplevels()



################## BUILD MODELS #################

# RANDOM INTERCEPT SUBJECT
mm.rP <- glmer(ENCOUNTER ~ (1|PID), data = df,family = "binomial")
## boundary (singular) fit: see help('isSingular')
# SUBJECT INTERCEPT | FIXED BLOCK 
## should be non predictive
print("ENCOUNTER ~ BLOCK + (1|PID)")
## [1] "ENCOUNTER ~ BLOCK + (1|PID)"
mm.BrP <- glmer(ENCOUNTER ~ BLOCK + (1|PID), 
                data = df,family = "binomial")
## boundary (singular) fit: see help('isSingular')
# :: TEST fixed factor 
compare_performance(mm.rP, mm.BrP, rank = TRUE)
## Random effect variances not available. Returned R2 does not account for random effects.
## Random effect variances not available. Returned R2 does not account for random effects.
## Following indices with missing values are not used for ranking:
##   R2_conditional, Sigma
## # Comparison of Model Performance Indices
## 
## Name   |    Model | R2 (marg.) |  RMSE | Sigma | Log_loss | Score_log | Score_spherical | AIC weights | AICc weights | BIC weights | Performance-Score
## ------------------------------------------------------------------------------------------------------------------------------------------------------
## mm.rP  | glmerMod |      0.000 | 0.495 | 1.000 |    0.683 |  -612.246 |             Inf |       0.977 |        0.978 |       1.000 |              Inf%
## mm.BrP | glmerMod |      0.002 | 0.494 | 1.000 |    0.682 |  -613.205 |             Inf |       0.023 |        0.022 |    5.99e-08 |              Inf%
paste("AIC with fixed effect is lower than random intercept only model?", AIC(logLik(mm.rP)) > AIC(logLik(mm.BrP)) )
## [1] "AIC with fixed effect is lower than random intercept only model? FALSE"
test_lrt(mm.rP,mm.BrP) #same as anova(m0, m1, test = "Chi")
## # Likelihood-Ratio-Test (LRT) for Model Comparison (ML-estimator)
## 
## Name   |    Model | df | df_diff | Chi2 |     p
## -----------------------------------------------
## mm.rP  | glmerMod |  2 |         |      |      
## mm.BrP | glmerMod |  7 |       5 | 2.48 | 0.780
paste("Likelihood Ratio test is significant? p = ",(test_lrt(mm.rP,mm.BrP))$p[2])
## [1] "Likelihood Ratio test is significant? p =  0.779620049605256"
print("A model with BLOCK is NOT a better fit than (random effect) participant alone")
## [1] "A model with BLOCK is NOT a better fit than (random effect) participant alone"
car::Anova(mm.BrP, type=2)
## Analysis of Deviance Table (Type II Wald chisquare tests)
## 
## Response: ENCOUNTER
##        Chisq Df Pr(>Chisq)
## BLOCK 2.4678  5     0.7813
print("BLOCK is NOT significant predictor in the model")
## [1] "BLOCK is NOT significant predictor in the model"
print("[this is as expected. suggests that we were successful in randomizing stimuli across the blocks]")
## [1] "[this is as expected. suggests that we were successful in randomizing stimuli across the blocks]"
# SUBJECT INTERCEPT | FIXED CATEGORY 
print("ENCOUNTER ~ CATEGORY + (1|PID)")
## [1] "ENCOUNTER ~ CATEGORY + (1|PID)"
mm.CrP <- glmer(ENCOUNTER ~ STIMULUS_CATEGORY + (1|PID), 
                data = df,family = "binomial")
# :: TEST fixed factor 
compare_performance(mm.rP, mm.BrP, mm.CrP, rank = TRUE)
## Random effect variances not available. Returned R2 does not account for random effects.
## Random effect variances not available. Returned R2 does not account for random effects.
## Following indices with missing values are not used for ranking:
##   R2_conditional, Sigma, Score_log, Score_spherical, ICC
## # Comparison of Model Performance Indices
## 
## Name   |    Model | R2 (marg.) |  RMSE | Sigma | Log_loss | Score_log | AIC weights | AICc weights | BIC weights | Performance-Score
## ------------------------------------------------------------------------------------------------------------------------------------
## mm.CrP | glmerMod |      0.044 | 0.481 | 1.000 |    0.655 |      -Inf |       1.000 |        1.000 |       1.000 |            83.33%
## mm.BrP | glmerMod |      0.002 | 0.494 | 1.000 |    0.682 |  -613.205 |    3.66e-11 |     3.59e-11 |    2.13e-13 |            17.58%
## mm.rP  | glmerMod |      0.000 | 0.495 | 1.000 |    0.683 |  -612.246 |    1.57e-09 |     1.60e-09 |    3.55e-06 |            16.67%
##anova instead of LRT b/c models are not nested 
anova(mm.BrP,mm.CrP) #same as anova(m0, m1, test = "Chi")
## Data: df
## Models:
## mm.CrP: ENCOUNTER ~ STIMULUS_CATEGORY + (1 | PID)
## mm.BrP: ENCOUNTER ~ BLOCK + (1 | PID)
##        npar    AIC    BIC  logLik deviance Chisq Df Pr(>Chisq)
## mm.CrP    5 1700.7 1726.4 -845.35   1690.7                    
## mm.BrP    7 1748.8 1784.8 -867.38   1734.8     0  2          1
paste("A model with CATEGORY predicting ENCOUNTER is a better fit than a model with only BLOCK, however it is not a significantly better fit, as evaluated by a CHISQR btwn the models")
## [1] "A model with CATEGORY predicting ENCOUNTER is a better fit than a model with only BLOCK, however it is not a significantly better fit, as evaluated by a CHISQR btwn the models"
car::Anova(mm.CrP, type = 3)
## Analysis of Deviance Table (Type III Wald chisquare tests)
## 
## Response: ENCOUNTER
##                     Chisq Df     Pr(>Chisq)    
## (Intercept)        1.2558  1         0.2624    
## STIMULUS_CATEGORY 44.8279  3 0.000000001007 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
print("CATEGORY is a significant predictor in this model")
## [1] "CATEGORY is a significant predictor in this model"
# SUBJECT INTERCEPT | FIXED CATEGORY + BLOCK
print("ENCOUNTER ~ CATEGORY + BLOCK + (1|PID)")
## [1] "ENCOUNTER ~ CATEGORY + BLOCK + (1|PID)"
mm.C_BrP <- glmer(ENCOUNTER ~ STIMULUS_CATEGORY + BLOCK + (1|PID), 
                data = df,family = "binomial")
# :: TEST fixed factor 
compare_performance(mm.rP, mm.BrP, mm.CrP, mm.C_BrP, rank = TRUE)
## Random effect variances not available. Returned R2 does not account for random effects.
## Random effect variances not available. Returned R2 does not account for random effects.
## Following indices with missing values are not used for ranking:
##   R2_conditional, Sigma, Score_log, ICC, Score_spherical
## # Comparison of Model Performance Indices
## 
## Name     |    Model | R2 (marg.) |  RMSE | Sigma | Log_loss | Score_log | AIC weights | AICc weights | BIC weights | Performance-Score
## --------------------------------------------------------------------------------------------------------------------------------------
## mm.CrP   | glmerMod |      0.044 | 0.481 | 1.000 |    0.655 |      -Inf |       0.977 |        0.978 |       1.000 |            82.44%
## mm.C_BrP | glmerMod |      0.047 | 0.482 | 1.000 |    0.656 |      -Inf |       0.023 |        0.022 |    6.07e-08 |            33.97%
## mm.BrP   | glmerMod |      0.002 | 0.494 | 1.000 |    0.682 |  -613.205 |    3.58e-11 |     3.51e-11 |    2.13e-13 |            17.53%
## mm.rP    | glmerMod |      0.000 | 0.495 | 1.000 |    0.683 |  -612.246 |    1.54e-09 |     1.57e-09 |    3.55e-06 |            16.67%
##anova instead of LRT b/c models are not nested 
anova(mm.CrP,mm.C_BrP) #same as anova(m0, m1, test = "Chi")
## Data: df
## Models:
## mm.CrP: ENCOUNTER ~ STIMULUS_CATEGORY + (1 | PID)
## mm.C_BrP: ENCOUNTER ~ STIMULUS_CATEGORY + BLOCK + (1 | PID)
##          npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)
## mm.CrP      5 1700.7 1726.4 -845.35   1690.7                     
## mm.C_BrP   10 1708.2 1759.7 -844.09   1688.2 2.5086  5     0.7752
test_lrt(mm.CrP, mm.C_BrP)
## # Likelihood-Ratio-Test (LRT) for Model Comparison (ML-estimator)
## 
## Name     |    Model | df | df_diff | Chi2 |     p
## -------------------------------------------------
## mm.CrP   | glmerMod |  5 |         |      |      
## mm.C_BrP | glmerMod | 10 |       5 | 2.51 | 0.775
paste("A model with a linear combination of CATEGORY and BLOCK predicting ENCOUNTER is NOT better fit than a model with only CATEGORY.")
## [1] "A model with a linear combination of CATEGORY and BLOCK predicting ENCOUNTER is NOT better fit than a model with only CATEGORY."
car::Anova(mm.C_BrP, type = 3)
## Analysis of Deviance Table (Type III Wald chisquare tests)
## 
## Response: ENCOUNTER
##                     Chisq Df      Pr(>Chisq)    
## (Intercept)        0.6873  1          0.4071    
## STIMULUS_CATEGORY 44.8524  3 0.0000000009946 ***
## BLOCK              2.5005  5          0.7764    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
print("CATEGORY is a significant predictor in this model, but BLOCK is not")
## [1] "CATEGORY is a significant predictor in this model, but BLOCK is not"
# SUBJECT INTERCEPT | FIXED BLOCK * CATEGORY INTERACTION 
print("ENCOUNTER ~ CATEGORY * BLOCK + (1|PID)")
## [1] "ENCOUNTER ~ CATEGORY * BLOCK + (1|PID)"
mm.CBrP <- glmer(ENCOUNTER ~ STIMULUS_CATEGORY * BLOCK + (1|PID), 
                data = df,family = "binomial",
               control=glmerControl(optimizer="bobyqa", #would not converge under Nelder)Mead
               optCtrl=list(maxfun=2e5)))
# :: TEST fixed factor 
compare_performance(mm.BrP, mm.CrP, mm.C_BrP, mm.CBrP, rank = TRUE)
## Random effect variances not available. Returned R2 does not account for random effects.
## Following indices with missing values are not used for ranking:
##   R2_conditional, Sigma, Score_log, ICC, Score_spherical
## # Comparison of Model Performance Indices
## 
## Name     |    Model | R2 (marg.) |  RMSE | Sigma | Log_loss | Score_log | AIC weights | AICc weights | BIC weights | Performance-Score
## --------------------------------------------------------------------------------------------------------------------------------------
## mm.CBrP  | glmerMod |      0.127 | 0.459 | 1.000 |    0.609 |      -Inf |       1.000 |        1.000 |    1.89e-14 |            66.67%
## mm.CrP   | glmerMod |      0.044 | 0.481 | 1.000 |    0.655 |      -Inf |    2.31e-09 |     3.80e-09 |       1.000 |            39.06%
## mm.C_BrP | glmerMod |      0.047 | 0.482 | 1.000 |    0.656 |      -Inf |    5.46e-11 |     8.43e-11 |    6.07e-08 |            22.68%
## mm.BrP   | glmerMod |      0.002 | 0.494 | 1.000 |    0.682 |  -613.205 |    8.46e-20 |     1.36e-19 |    2.13e-13 |            16.67%
##anova instead of LRT b/c models are not nested 
anova(mm.C_BrP, mm.CBrP)
## Data: df
## Models:
## mm.C_BrP: ENCOUNTER ~ STIMULUS_CATEGORY + BLOCK + (1 | PID)
## mm.CBrP: ENCOUNTER ~ STIMULUS_CATEGORY * BLOCK + (1 | PID)
##          npar    AIC    BIC  logLik deviance  Chisq Df      Pr(>Chisq)    
## mm.C_BrP   10 1708.2 1759.7 -844.09   1688.2                              
## mm.CBrP    25 1660.9 1789.6 -805.46   1610.9 77.263 15 0.0000000002203 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
test_lrt(mm.C_BrP, mm.CBrP, verbose = TRUE) #same as anova(m0, m1, test = "Chi")
## # Likelihood-Ratio-Test (LRT) for Model Comparison (ML-estimator)
## 
## Name     |    Model | df | df_diff |  Chi2 |      p
## ---------------------------------------------------
## mm.C_BrP | glmerMod | 10 |         |       |       
## mm.CBrP  | glmerMod | 25 |      15 | 77.26 | < .001
paste("A model with an interaction of BLOCK * CATEGORY is a significantly better fit than a model with main effects only. (NOTE that block*category == stimulus. Here we fit the interaction so that we can portion variance between block and category, and compare the models as they will be nested)")
## [1] "A model with an interaction of BLOCK * CATEGORY is a significantly better fit than a model with main effects only. (NOTE that block*category == stimulus. Here we fit the interaction so that we can portion variance between block and category, and compare the models as they will be nested)"
car::Anova(mm.CBrP, type = 3)
## Analysis of Deviance Table (Type III Wald chisquare tests)
## 
## Response: ENCOUNTER
##                           Chisq Df     Pr(>Chisq)    
## (Intercept)              0.4543  1        0.50030    
## STIMULUS_CATEGORY        7.2414  3        0.06459 .  
## BLOCK                    8.5141  5        0.13009    
## STIMULUS_CATEGORY:BLOCK 68.4523 15 0.000000008415 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
print("In this model, only the interaction is significant. Neither main effects are significant.")
## [1] "In this model, only the interaction is significant. Neither main effects are significant."
print("THIS SUGGESTS THAT ENCOUNTER IS BETTER PREDICTED BY THE UNIQUE STIMULUS THAN THE CATEGORY")
## [1] "THIS SUGGESTS THAT ENCOUNTER IS BETTER PREDICTED BY THE UNIQUE STIMULUS THAN THE CATEGORY"
## SANITY CHECK, MODEL WITH STIMULUS SHOULD MATCH VARIANCE EXPLAINED BY BLOCK*CATEGORY
# SUBJECT INTERCEPT | FIXED STIMULUS 
print("SANITY CHECK — MODEL BY STIMULUS")
## [1] "SANITY CHECK — MODEL BY STIMULUS"
print("ENCOUNTER ~ STIMULUS + (1|PID)")
## [1] "ENCOUNTER ~ STIMULUS + (1|PID)"
mm.SrP <- glmer(ENCOUNTER ~ STIMULUS + (1|PID),
                data = df,family = "binomial",
               control=glmerControl(optimizer="bobyqa", #would not converge under Nelder)Mead
                            optCtrl=list(maxfun=2e5)))
## :: TEST fixed factor 
compare_performance(mm.CBrP, mm.SrP, rank = TRUE)
## Following indices with missing values are not used for ranking: Sigma
## # Comparison of Model Performance Indices
## 
## Name    |    Model | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma | Log_loss | Score_log | Score_spherical | AIC weights | AICc weights | BIC weights | Performance-Score
## ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## mm.CBrP | glmerMod |      0.157 |      0.127 | 0.035 | 0.459 | 1.000 |    0.609 |      -Inf |       7.862e-04 |       0.500 |        0.500 |       0.500 |             -Inf%
## mm.SrP  | glmerMod |      0.157 |      0.127 | 0.035 | 0.459 | 1.000 |    0.609 |      -Inf |       7.862e-04 |       0.500 |        0.500 |       0.500 |             -Inf%
anova(mm.SrP, mm.CBrP)
## Data: df
## Models:
## mm.SrP: ENCOUNTER ~ STIMULUS + (1 | PID)
## mm.CBrP: ENCOUNTER ~ STIMULUS_CATEGORY * BLOCK + (1 | PID)
##         npar    AIC    BIC  logLik deviance Chisq Df Pr(>Chisq)
## mm.SrP    25 1660.9 1789.6 -805.46   1610.9                    
## mm.CBrP   25 1660.9 1789.6 -805.46   1610.9     0  0
print ("SANITY CHECKED! STIMULUS MODEL SAME FIT AS BLOCK*CATEGORY")
## [1] "SANITY CHECKED! STIMULUS MODEL SAME FIT AS BLOCK*CATEGORY"
#### SET BEST MODEL
m_best <- mm.CBrP

3.0.1.3 model describe

############ DESCRIBE FINAL MODEL ###########
summary(m_best)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: ENCOUNTER ~ STIMULUS_CATEGORY * BLOCK + (1 | PID)
##    Data: df
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 200000))
## 
##      AIC      BIC   logLik deviance df.resid 
##   1660.9   1789.6   -805.5   1610.9     1247 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7645 -0.9319  0.5190  0.7636  1.6302 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  PID    (Intercept) 0.1188   0.3446  
## Number of obs: 1272, groups:  PID, 318
## 
## Fixed effects:
##                            Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                -0.18791    0.27878  -0.674 0.500299    
## STIMULUS_CATEGORYB          0.92923    0.40109   2.317 0.020517 *  
## STIMULUS_CATEGORYC          0.22530    0.38797   0.581 0.561429    
## STIMULUS_CATEGORYD          0.76394    0.39585   1.930 0.053622 .  
## BLOCKB2                     0.42671    0.40053   1.065 0.286703    
## BLOCKB3                    -0.21345    0.40282  -0.530 0.596188    
## BLOCKB4                    -0.04190    0.39638  -0.106 0.915813    
## BLOCKB5                    -0.41083    0.40537  -1.013 0.310836    
## BLOCKB6                     0.58889    0.40296   1.461 0.143903    
## STIMULUS_CATEGORYB:BLOCKB2 -2.09657    0.58276  -3.598 0.000321 ***
## STIMULUS_CATEGORYC:BLOCKB2  1.61853    0.64977   2.491 0.012741 *  
## STIMULUS_CATEGORYD:BLOCKB2 -0.07427    0.57670  -0.129 0.897529    
## STIMULUS_CATEGORYB:BLOCKB3  0.04077    0.57259   0.071 0.943230    
## STIMULUS_CATEGORYC:BLOCKB3  0.33494    0.55909   0.599 0.549119    
## STIMULUS_CATEGORYD:BLOCKB3  0.29235    0.57093   0.512 0.608611    
## STIMULUS_CATEGORYB:BLOCKB4 -1.16471    0.56442  -2.064 0.039059 *  
## STIMULUS_CATEGORYC:BLOCKB4  0.89374    0.56498   1.582 0.113670    
## STIMULUS_CATEGORYD:BLOCKB4  0.01199    0.56137   0.021 0.982956    
## STIMULUS_CATEGORYB:BLOCKB5  0.18484    0.57197   0.323 0.746577    
## STIMULUS_CATEGORYC:BLOCKB5  0.41228    0.55913   0.737 0.460903    
## STIMULUS_CATEGORYD:BLOCKB5  1.09561    0.59252   1.849 0.064451 .  
## STIMULUS_CATEGORYB:BLOCKB6 -2.16414    0.58148  -3.722 0.000198 ***
## STIMULUS_CATEGORYC:BLOCKB6  0.72354    0.59186   1.222 0.221526    
## STIMULUS_CATEGORYD:BLOCKB6 -0.51054    0.57083  -0.894 0.371121    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation matrix not shown by default, as p = 24 > 12.
## Use print(x, correlation=TRUE)  or
##     vcov(x)        if you need it
report(m_best)
## We fitted a logistic mixed model (estimated using ML and BOBYQA optimizer) to
## predict ENCOUNTER with STIMULUS_CATEGORY and BLOCK (formula: ENCOUNTER ~
## STIMULUS_CATEGORY * BLOCK). The model included PID as random effect (formula:
## ~1 | PID). The model's total explanatory power is moderate (conditional R2 =
## 0.16) and the part related to the fixed effects alone (marginal R2) is of 0.13.
## The model's intercept, corresponding to STIMULUS_CATEGORY = A and BLOCK = B1,
## is at -0.19 (95% CI [-0.73, 0.36], p = 0.500). Within this model:
## 
##   - The effect of STIMULUS CATEGORY [B] is statistically significant and positive
## (beta = 0.93, 95% CI [0.14, 1.72], p = 0.021; Std. beta = 0.93, 95% CI [0.14,
## 1.72])
##   - The effect of STIMULUS CATEGORY [C] is statistically non-significant and
## positive (beta = 0.23, 95% CI [-0.54, 0.99], p = 0.561; Std. beta = 0.23, 95%
## CI [-0.54, 0.99])
##   - The effect of STIMULUS CATEGORY [D] is statistically non-significant and
## positive (beta = 0.76, 95% CI [-0.01, 1.54], p = 0.054; Std. beta = 0.76, 95%
## CI [-0.01, 1.54])
##   - The effect of BLOCK [B2] is statistically non-significant and positive (beta
## = 0.43, 95% CI [-0.36, 1.21], p = 0.287; Std. beta = 0.43, 95% CI [-0.36,
## 1.21])
##   - The effect of BLOCK [B3] is statistically non-significant and negative (beta
## = -0.21, 95% CI [-1.00, 0.58], p = 0.596; Std. beta = -0.21, 95% CI [-1.00,
## 0.58])
##   - The effect of BLOCK [B4] is statistically non-significant and negative (beta
## = -0.04, 95% CI [-0.82, 0.73], p = 0.916; Std. beta = -0.04, 95% CI [-0.82,
## 0.73])
##   - The effect of BLOCK [B5] is statistically non-significant and negative (beta
## = -0.41, 95% CI [-1.21, 0.38], p = 0.311; Std. beta = -0.41, 95% CI [-1.21,
## 0.38])
##   - The effect of BLOCK [B6] is statistically non-significant and positive (beta
## = 0.59, 95% CI [-0.20, 1.38], p = 0.144; Std. beta = 0.59, 95% CI [-0.20,
## 1.38])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B2] is statistically significant
## and negative (beta = -2.10, 95% CI [-3.24, -0.95], p < .001; Std. beta = -2.10,
## 95% CI [-3.24, -0.95])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B2] is statistically significant
## and positive (beta = 1.62, 95% CI [0.35, 2.89], p = 0.013; Std. beta = 1.62,
## 95% CI [0.35, 2.89])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B2] is statistically
## non-significant and negative (beta = -0.07, 95% CI [-1.20, 1.06], p = 0.898;
## Std. beta = -0.07, 95% CI [-1.20, 1.06])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B3] is statistically
## non-significant and positive (beta = 0.04, 95% CI [-1.08, 1.16], p = 0.943;
## Std. beta = 0.04, 95% CI [-1.08, 1.16])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B3] is statistically
## non-significant and positive (beta = 0.33, 95% CI [-0.76, 1.43], p = 0.549;
## Std. beta = 0.33, 95% CI [-0.76, 1.43])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B3] is statistically
## non-significant and positive (beta = 0.29, 95% CI [-0.83, 1.41], p = 0.609;
## Std. beta = 0.29, 95% CI [-0.83, 1.41])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B4] is statistically significant
## and negative (beta = -1.16, 95% CI [-2.27, -0.06], p = 0.039; Std. beta =
## -1.16, 95% CI [-2.27, -0.06])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B4] is statistically
## non-significant and positive (beta = 0.89, 95% CI [-0.21, 2.00], p = 0.114;
## Std. beta = 0.89, 95% CI [-0.21, 2.00])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B4] is statistically
## non-significant and positive (beta = 0.01, 95% CI [-1.09, 1.11], p = 0.983;
## Std. beta = 0.01, 95% CI [-1.09, 1.11])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B5] is statistically
## non-significant and positive (beta = 0.18, 95% CI [-0.94, 1.31], p = 0.747;
## Std. beta = 0.18, 95% CI [-0.94, 1.31])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B5] is statistically
## non-significant and positive (beta = 0.41, 95% CI [-0.68, 1.51], p = 0.461;
## Std. beta = 0.41, 95% CI [-0.68, 1.51])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B5] is statistically
## non-significant and positive (beta = 1.10, 95% CI [-0.07, 2.26], p = 0.064;
## Std. beta = 1.10, 95% CI [-0.07, 2.26])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B6] is statistically significant
## and negative (beta = -2.16, 95% CI [-3.30, -1.02], p < .001; Std. beta = -2.16,
## 95% CI [-3.30, -1.02])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B6] is statistically
## non-significant and positive (beta = 0.72, 95% CI [-0.44, 1.88], p = 0.222;
## Std. beta = 0.72, 95% CI [-0.44, 1.88])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B6] is statistically
## non-significant and negative (beta = -0.51, 95% CI [-1.63, 0.61], p = 0.371;
## Std. beta = -0.51, 95% CI [-1.63, 0.61])
## 
## Standardized parameters were obtained by fitting the model on a standardized
## version of the dataset. 95% Confidence Intervals (CIs) and p-values were
## computed using a Wald z-distribution approximation.
######### PRINT COEFFICIENTS 
print("COEFFICIENT ESTIMATES — LOG ODDS")
## [1] "COEFFICIENT ESTIMATES — LOG ODDS"
tidy(m_best)
## # A tibble: 25 × 7
##    effect group term                       estimate std.error statistic  p.value
##    <chr>  <chr> <chr>                         <dbl>     <dbl>     <dbl>    <dbl>
##  1 fixed  <NA>  (Intercept)                 -0.188      0.279    -0.674 0.500   
##  2 fixed  <NA>  STIMULUS_CATEGORYB           0.929      0.401     2.32  0.0205  
##  3 fixed  <NA>  STIMULUS_CATEGORYC           0.225      0.388     0.581 0.561   
##  4 fixed  <NA>  STIMULUS_CATEGORYD           0.764      0.396     1.93  0.0536  
##  5 fixed  <NA>  BLOCKB2                      0.427      0.401     1.07  0.287   
##  6 fixed  <NA>  BLOCKB3                     -0.213      0.403    -0.530 0.596   
##  7 fixed  <NA>  BLOCKB4                     -0.0419     0.396    -0.106 0.916   
##  8 fixed  <NA>  BLOCKB5                     -0.411      0.405    -1.01  0.311   
##  9 fixed  <NA>  BLOCKB6                      0.589      0.403     1.46  0.144   
## 10 fixed  <NA>  STIMULUS_CATEGORYB:BLOCKB2  -2.10       0.583    -3.60  0.000321
## # ℹ 15 more rows
print("COEFFICIENT ESTIMATES — ODDS RATIOS")
## [1] "COEFFICIENT ESTIMATES — ODDS RATIOS"
tidy(m_best, exponentiate=TRUE)
## # A tibble: 25 × 7
##    effect group term                       estimate std.error statistic  p.value
##    <chr>  <chr> <chr>                         <dbl>     <dbl>     <dbl>    <dbl>
##  1 fixed  <NA>  (Intercept)                   0.829    0.231     -0.674 0.500   
##  2 fixed  <NA>  STIMULUS_CATEGORYB            2.53     1.02       2.32  0.0205  
##  3 fixed  <NA>  STIMULUS_CATEGORYC            1.25     0.486      0.581 0.561   
##  4 fixed  <NA>  STIMULUS_CATEGORYD            2.15     0.850      1.93  0.0536  
##  5 fixed  <NA>  BLOCKB2                       1.53     0.614      1.07  0.287   
##  6 fixed  <NA>  BLOCKB3                       0.808    0.325     -0.530 0.596   
##  7 fixed  <NA>  BLOCKB4                       0.959    0.380     -0.106 0.916   
##  8 fixed  <NA>  BLOCKB5                       0.663    0.269     -1.01  0.311   
##  9 fixed  <NA>  BLOCKB6                       1.80     0.726      1.46  0.144   
## 10 fixed  <NA>  STIMULUS_CATEGORYB:BLOCKB2    0.123    0.0716    -3.60  0.000321
## # ℹ 15 more rows

3.0.1.4 model vis

############ VISUALIZE MODEL COEFFICIENTS 
#SJPLOT | MODEL | ODDS RATIO
#library(sjPlot)
plot_model(m_best, type = "est",
           vline.color = "red", 
           show.intercept = TRUE, 
           show.values = TRUE) + theme_minimal() + 
  labs(title = "Model Predicted Odds Ratio for ENCOUNTER",
       subtitle = "")

############ VISUALIZE MODEL PREDICTIONS
#SJPLOT | MODEL | PROBABILITIES
plot_model(m_best, type = "int", mdrt.values = "meansd") + theme_minimal()

plot_model(m_best, type="emm", 
           terms = c("BLOCK"), ci.lvl = 0.95) + theme_minimal() + 
  labs(title = "Estimated Marginal Means for BLOCK")
## NOTE: Results may be misleading due to involvement in interactions

plot_model(m_best, type="emm", 
           terms = c("STIMULUS_CATEGORY"), ci.lvl = 0.95) + theme_minimal() + 
  labs(title = "Estimated Marginal Means for CATEGORY")
## NOTE: Results may be misleading due to involvement in interactions

plot_model(m_best, type="emm", 
           terms = c("BLOCK","STIMULUS_CATEGORY"), ci.lvl = 0.95) + theme_minimal() + 
  labs(title = "Estimated Marginal Means for INTERACTION")

## MANUAL PREDICTION INTERACTION PLOT [bc sjPlot cant facet argh]
means <- estimate_means(m_best, at=c("BLOCK","STIMULUS_CATEGORY"), transform = "response",
                        backend="emmeans")
m <- as_tibble(means)

## CUSTOM PREDICTIONS PLOT
m %>% ggplot( aes(x = BLOCK, y = Probability, color=STIMULUS_CATEGORY)) +
  geom_point() +
  geom_linerange(aes(ymin = CI_low, ymax=CI_high)) + 
  scale_y_continuous(limits = c(0,1))+
  facet_wrap(~STIMULUS_CATEGORY) + 
  theme_minimal() + easy_remove_legend() + 
  labs(title = "MODEL PREDICTED Probability of ENGAGE (rather than scroll)")

3.0.2 ENCOUNTER ~ LIKE

Here we explore whether another variable CHART_LIKE is a better predictor of ENCOUNTER than STIMULUS_CATEGORY.

3.0.2.1 visualize

df <- df_graphs %>%
  ## FILTER OUT B0-0 COMMON STIMULUS (so cells can be balanced)
  filter(STIMULUS != "B0-0") %>% 
  select(STIMULUS, STIMULUS_CATEGORY, BLOCK, ENCOUNTER, CHART_LIKE, CHART_TRUST, PID) %>% 
  mutate(
    STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY), #REVERSE FACTOR ORDER SO A IS REFERENCE
    ENCOUNTER = fct_rev(ENCOUNTER), #REVERSE SO SCROLL IS REFERENCE
    STIM_NUM = str_remove(STIMULUS, regex("B..", dotall = TRUE))
    ## (only used if not filtering out B0-0)
    ## RECODE #recode b00 graph as category D [bc it fits in that category]
    # STIMULUS_CATEGORY = fct_recode(STIMULUS_CATEGORY, D="F") 
  ) %>% droplevels()


## ENCOUNTER BY AVG CHART LIKE
ggbetweenstats(data = df, x = ENCOUNTER, y=CHART_LIKE, color = ENCOUNTER,
               violin.args = list(width = 0, linewidth = 0), #REMOVE violin plot
               results.subtitle = FALSE) + 
  scale_color_manual(values = my_palettes(name="encounter", direction = "-1")) + 
  labs(title = "ENCOUNTER ~ CHART LIKE")
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.

## ENCOUNTER BY AVG CHART LIKE & CATEGORY
grouped_ggbetweenstats(data = df, x = ENCOUNTER, y=CHART_LIKE, color = ENCOUNTER,
                       grouping.var = STIMULUS_CATEGORY,
               violin.args = list(width = 0, linewidth = 0), #REMOVE violin plot
               results.subtitle = FALSE,
               ggplot.component = scale_color_manual(values = my_palettes(name="encounter", direction = "-1"))
               ) + 
  scale_color_manual(values = my_palettes(name="encounter", direction = "-1")) +
  plot_annotation(title = "ENCOUNTER ~ CHART LIKE + CATEGORY")
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.

## ENCOUNTER BY AVG CHART LIKE & BLOCK
grouped_ggbetweenstats(data = df, x = ENCOUNTER, y=CHART_LIKE, color = ENCOUNTER,
                       grouping.var = BLOCK,
               violin.args = list(width = 0, linewidth = 0), #REMOVE violin plot
               results.subtitle = FALSE,
               ggplot.component = scale_color_manual(values = my_palettes(name="encounter", direction = "-1"))
               ) + 
  scale_color_manual(values = my_palettes(name="encounter", direction = "-1")) +
  plot_annotation(title = "ENCOUNTER ~ CHART LIKE + BLOCK")
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.
## Scale for colour is already present.
## Adding another scale for colour, which will replace the existing scale.

## ENCOUNTER BY AVG CHART LIKE & STIMULUS
df %>% 
  group_by(BLOCK, STIMULUS_CATEGORY) %>% mutate(m=mean(CHART_LIKE)) %>%
  ggplot( aes(x = BLOCK, y = CHART_LIKE, color = ENCOUNTER)) + 
  geom_boxplot(width = 0.3, fill = "white", position = position_dodge(width=1)) + 
  geom_jitter(alpha = 0.2, position = position_dodge(width=1)) +
  scale_fill_manual(values = my_palettes(name="encounter", direction = "-1")) +
  scale_color_manual(values = my_palettes(name="encounter", direction = "-1")) +
  facet_wrap(~STIMULUS_CATEGORY) + 
  theme_minimal() + easy_remove_legend() + 
  labs(title = "ENCOUNTER by CHART LIKE for BLOCK & STIMULUS")

3.0.2.2 model fit

Is CHART_LIKE a better predictor of engagement? Here we fit a series of mixed effects logistic regression models, predicting ENCOUNTER (reference category = SCROLL) by CHART_LIKE and comparing this to the best fit model of STIMULUS_CATEGORY and BLOCK to determine if variance in encounter choice is better explained by the stimulus category (i.e. level of embellishment) or whether the participant likes the chart.

df <- df_graphs %>%
  ## FILTER OUT B0-0 COMMON STIMULUS (so cells can be balanced)
  filter(STIMULUS != "B0-0") %>% 
  select(STIMULUS, STIMULUS_CATEGORY, BLOCK, ENCOUNTER, CHART_LIKE, CHART_TRUST, PID) %>% 
  mutate(
    STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY), #REVERSE FACTOR ORDER SO A IS REFERENCE
    ENCOUNTER = fct_rev(ENCOUNTER), #REVERSE SO SCROLL IS REFERENCE
    CHART_LIKE_Z = datawizard::standardise(CHART_LIKE) ## to avoid model non converge
    ## (only used if not filtering out B0-0)
    ## RECODE #recode b00 graph as category D [bc it fits in that category]
    # STIMULUS_CATEGORY = fct_recode(STIMULUS_CATEGORY, D="F") 
  ) %>% droplevels()



################## BUILD MODELS #################

## BEST FIT MODEL OF CATEGORY * BLOCK
# SUBJECT INTERCEPT | FIXED BLOCK * CATEGORY INTERACTION 
print("ENCOUNTER ~ CATEGORY * BLOCK + (1|PID)")
## [1] "ENCOUNTER ~ CATEGORY * BLOCK + (1|PID)"
mm.CBrP <- glmer(ENCOUNTER ~ STIMULUS_CATEGORY * BLOCK + (1|PID), 
                data = df,family = "binomial",
               control=glmerControl(optimizer="bobyqa", #would not converge under Nelder)Mead
               optCtrl=list(maxfun=2e5)))



# SUBJECT INTERCEPT | FIXED CHART_LIKE
print("ENCOUNTER ~ CATEGORY + (1|PID)")
## [1] "ENCOUNTER ~ CATEGORY + (1|PID)"
mm.LrP <- glmer(ENCOUNTER ~ CHART_LIKE_Z + (1|PID), 
                data = df,family = "binomial")
# :: TEST fixed factor 
compare_performance(mm.CBrP, mm.LrP, rank = TRUE)
## Following indices with missing values are not used for ranking: Sigma
## # Comparison of Model Performance Indices
## 
## Name    |    Model | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma | Log_loss | Score_log | Score_spherical | AIC weights | AICc weights | BIC weights | Performance-Score
## ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## mm.CBrP | glmerMod |      0.157 |      0.127 | 0.035 | 0.459 | 1.000 |    0.609 |      -Inf |       7.862e-04 |    5.06e-39 |     3.03e-39 |    1.29e-63 |             -Inf%
## mm.LrP  | glmerMod |      0.289 |      0.247 | 0.056 | 0.429 | 1.000 |    0.547 |      -Inf |       7.866e-04 |        1.00 |         1.00 |        1.00 |             -Inf%
##anova instead of LRT b/c models are not nested 
anova(mm.CBrP, mm.LrP) #same as anova(m0, m1, test = "Chi")
## Data: df
## Models:
## mm.LrP: ENCOUNTER ~ CHART_LIKE_Z + (1 | PID)
## mm.CBrP: ENCOUNTER ~ STIMULUS_CATEGORY * BLOCK + (1 | PID)
##         npar    AIC    BIC  logLik deviance Chisq Df Pr(>Chisq)
## mm.LrP     3 1484.6 1500.0 -739.28   1478.6                    
## mm.CBrP   25 1660.9 1789.6 -805.46   1610.9     0 22          1
paste("A model with CHART_LIKE predicting ENCOUNTER is a better fit than a model with CATEGORY*BLOCK, though not significantly so")
## [1] "A model with CHART_LIKE predicting ENCOUNTER is a better fit than a model with CATEGORY*BLOCK, though not significantly so"
car::Anova(mm.LrP, type = 3)
## Analysis of Deviance Table (Type III Wald chisquare tests)
## 
## Response: ENCOUNTER
##                Chisq Df            Pr(>Chisq)    
## (Intercept)   26.194  1          0.0000003087 ***
## CHART_LIKE_Z 170.851  1 < 0.00000000000000022 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
print("CHART_LIKE is a significant predictor in this model")
## [1] "CHART_LIKE is a significant predictor in this model"
# SUBJECT INTERCEPT | FIXED CHART_LIKE + STIMULUS_CATEGORY * BLOCK
print("ENCOUNTER ~ CATEGORY * BLOCK + (1|PID)")
## [1] "ENCOUNTER ~ CATEGORY * BLOCK + (1|PID)"
mm.L_CBrP <- glmer(ENCOUNTER ~ CHART_LIKE_Z + STIMULUS_CATEGORY * BLOCK + (1|PID), 
                data = df,family = "binomial",
               control=glmerControl(optimizer="bobyqa", #would not converge under Nelder)Mead
               optCtrl=list(maxfun=2e5)))
# :: TEST fixed factor 
compare_performance(mm.LrP, mm.CBrP, mm.L_CBrP, rank = TRUE)
## Following indices with missing values are not used for ranking: Sigma
## # Comparison of Model Performance Indices
## 
## Name      |    Model | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma | Log_loss | Score_log | Score_spherical | AIC weights | AICc weights | BIC weights | Performance-Score
## ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## mm.LrP    | glmerMod |      0.289 |      0.247 | 0.056 | 0.429 | 1.000 |    0.547 |      -Inf |       7.866e-04 |    4.56e-09 |     7.93e-09 |        1.00 |             -Inf%
## mm.CBrP   | glmerMod |      0.157 |      0.127 | 0.035 | 0.459 | 1.000 |    0.609 |      -Inf |       7.862e-04 |    2.31e-47 |     2.41e-47 |    1.29e-63 |             -Inf%
## mm.L_CBrP | glmerMod |      0.391 |      0.332 | 0.088 | 0.406 | 1.000 |    0.500 |      -Inf |       8.097e-04 |       1.000 |        1.000 |    4.25e-18 |             -Inf%
##anova instead of LRT b/c models are not nested 

### CHECK AGAINST JUST CHART LIKE
anova(mm.L_CBrP, mm.LrP)
## Data: df
## Models:
## mm.LrP: ENCOUNTER ~ CHART_LIKE_Z + (1 | PID)
## mm.L_CBrP: ENCOUNTER ~ CHART_LIKE_Z + STIMULUS_CATEGORY * BLOCK + (1 | PID)
##           npar    AIC  BIC  logLik deviance  Chisq Df     Pr(>Chisq)    
## mm.LrP       3 1484.6 1500 -739.28   1478.6                             
## mm.L_CBrP   26 1446.2 1580 -697.08   1394.2 84.414 23 0.000000006037 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
test_lrt(mm.L_CBrP, mm.LrP, verbose = TRUE) #same as anova(m0, m1, test = "Chi")
## # Likelihood-Ratio-Test (LRT) for Model Comparison (ML-estimator)
## 
## Name      |    Model | df | df_diff |  Chi2 |      p
## ----------------------------------------------------
## mm.L_CBrP | glmerMod | 26 |         |       |       
## mm.LrP    | glmerMod |  3 |     -23 | 84.41 | < .001
paste("A model adding the interaction of BLOCK * CATEGORY to CHART_LIKE is a significantly better fit than a model with the CHART_LIKE alone")
## [1] "A model adding the interaction of BLOCK * CATEGORY to CHART_LIKE is a significantly better fit than a model with the CHART_LIKE alone"
### CHECK AGAINST IXN MODEL
anova(mm.L_CBrP, mm.CBrP)
## Data: df
## Models:
## mm.CBrP: ENCOUNTER ~ STIMULUS_CATEGORY * BLOCK + (1 | PID)
## mm.L_CBrP: ENCOUNTER ~ CHART_LIKE_Z + STIMULUS_CATEGORY * BLOCK + (1 | PID)
##           npar    AIC    BIC  logLik deviance  Chisq Df            Pr(>Chisq)
## mm.CBrP     25 1660.9 1789.6 -805.46   1610.9                                
## mm.L_CBrP   26 1446.2 1580.0 -697.08   1394.2 216.77  1 < 0.00000000000000022
##              
## mm.CBrP      
## mm.L_CBrP ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
test_lrt(mm.L_CBrP, mm.CBrP, verbose = TRUE) #same as anova(m0, m1, test = "Chi")
## # Likelihood-Ratio-Test (LRT) for Model Comparison (ML-estimator)
## 
## Name      |    Model | df | df_diff |   Chi2 |      p
## -----------------------------------------------------
## mm.L_CBrP | glmerMod | 26 |         |        |       
## mm.CBrP   | glmerMod | 25 |      -1 | 216.77 | < .001
paste("A model adding CHART LIKE to the interaction of BLOCK * CATEGORY is a significantly better fit than a model with the interaction only.")
## [1] "A model adding CHART LIKE to the interaction of BLOCK * CATEGORY is a significantly better fit than a model with the interaction only."
### EXAMINE THIS MODEL
car::Anova(mm.L_CBrP, type = 3)
## Analysis of Deviance Table (Type III Wald chisquare tests)
## 
## Response: ENCOUNTER
##                            Chisq Df            Pr(>Chisq)    
## (Intercept)               1.4215  1             0.2331541    
## CHART_LIKE_Z            145.9065  1 < 0.00000000000000022 ***
## STIMULUS_CATEGORY        11.2656  3             0.0103729 *  
## BLOCK                     5.2701  5             0.3838173    
## STIMULUS_CATEGORY:BLOCK  39.6128 15             0.0005187 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
print("In this model, the CHART_LIKE variable is significant, along with the STIMULUS_CATEGORY and interaction of CATEGORY & BLOCK")
## [1] "In this model, the CHART_LIKE variable is significant, along with the STIMULUS_CATEGORY and interaction of CATEGORY & BLOCK"
print("THIS SUGGESTS THAT ENCOUNTER IS BETTER PREDICTED BY THE UNIQUE STIMULUS THAN THE CATEGORY")
## [1] "THIS SUGGESTS THAT ENCOUNTER IS BETTER PREDICTED BY THE UNIQUE STIMULUS THAN THE CATEGORY"
#### SET BEST MODEL
m_best <- mm.L_CBrP

3.0.2.3 model describe

############ DESCRIBE FINAL MODEL ###########
summary(m_best)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: ENCOUNTER ~ CHART_LIKE_Z + STIMULUS_CATEGORY * BLOCK + (1 | PID)
##    Data: df
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 200000))
## 
##      AIC      BIC   logLik deviance df.resid 
##   1446.2   1580.0   -697.1   1394.2     1246 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.3465 -0.6858  0.3146  0.6361  3.5050 
## 
## Random effects:
##  Groups Name        Variance Std.Dev.
##  PID    (Intercept) 0.3155   0.5617  
## Number of obs: 1272, groups:  PID, 318
## 
## Fixed effects:
##                            Estimate Std. Error z value             Pr(>|z|)    
## (Intercept)                -0.38546    0.32330  -1.192             0.233154    
## CHART_LIKE_Z                1.11596    0.09239  12.079 < 0.0000000000000002 ***
## STIMULUS_CATEGORYB          1.10182    0.44870   2.456             0.014065 *  
## STIMULUS_CATEGORYC          1.25058    0.45381   2.756             0.005856 ** 
## STIMULUS_CATEGORYD          1.32554    0.44888   2.953             0.003147 ** 
## BLOCKB2                     0.41195    0.45923   0.897             0.369692    
## BLOCKB3                     0.30971    0.45896   0.675             0.499805    
## BLOCKB4                    -0.10777    0.45792  -0.235             0.813931    
## BLOCKB5                     0.20398    0.46337   0.440             0.659787    
## BLOCKB6                     0.84239    0.46207   1.823             0.068290 .  
## STIMULUS_CATEGORYB:BLOCKB2 -1.71647    0.64920  -2.644             0.008194 ** 
## STIMULUS_CATEGORYC:BLOCKB2  0.37250    0.72521   0.514             0.607499    
## STIMULUS_CATEGORYD:BLOCKB2 -0.43674    0.64963  -0.672             0.501392    
## STIMULUS_CATEGORYB:BLOCKB3 -0.51064    0.63964  -0.798             0.424685    
## STIMULUS_CATEGORYC:BLOCKB3 -0.29389    0.62892  -0.467             0.640296    
## STIMULUS_CATEGORYD:BLOCKB3 -0.94211    0.64856  -1.453             0.146331    
## STIMULUS_CATEGORYB:BLOCKB4 -0.52055    0.63337  -0.822             0.411143    
## STIMULUS_CATEGORYC:BLOCKB4 -0.39452    0.64252  -0.614             0.539203    
## STIMULUS_CATEGORYD:BLOCKB4 -0.40518    0.63479  -0.638             0.523282    
## STIMULUS_CATEGORYB:BLOCKB5 -0.41307    0.63616  -0.649             0.516125    
## STIMULUS_CATEGORYC:BLOCKB5 -1.15899    0.64592  -1.794             0.072761 .  
## STIMULUS_CATEGORYD:BLOCKB5  0.04322    0.66128   0.065             0.947886    
## STIMULUS_CATEGORYB:BLOCKB6 -2.42130    0.64518  -3.753             0.000175 ***
## STIMULUS_CATEGORYC:BLOCKB6 -0.40993    0.66794  -0.614             0.539394    
## STIMULUS_CATEGORYD:BLOCKB6 -0.65874    0.64199  -1.026             0.304846    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation matrix not shown by default, as p = 25 > 12.
## Use print(x, correlation=TRUE)  or
##     vcov(x)        if you need it
report(m_best)
## We fitted a logistic mixed model (estimated using ML and BOBYQA optimizer) to
## predict ENCOUNTER with CHART_LIKE_Z, STIMULUS_CATEGORY and BLOCK (formula:
## ENCOUNTER ~ CHART_LIKE_Z + STIMULUS_CATEGORY * BLOCK). The model included PID
## as random effect (formula: ~1 | PID). The model's total explanatory power is
## substantial (conditional R2 = 0.39) and the part related to the fixed effects
## alone (marginal R2) is of 0.33. The model's intercept, corresponding to
## CHART_LIKE_Z = 0, STIMULUS_CATEGORY = A and BLOCK = B1, is at -0.39 (95% CI
## [-1.02, 0.25], p = 0.233). Within this model:
## 
##   - The effect of CHART LIKE Z is statistically significant and positive (beta =
## 1.12, 95% CI [0.93, 1.30], p < .001; Std. beta = 1.12, 95% CI [0.93, 1.30])
##   - The effect of STIMULUS CATEGORY [B] is statistically significant and positive
## (beta = 1.10, 95% CI [0.22, 1.98], p = 0.014; Std. beta = 1.10, 95% CI [0.22,
## 1.98])
##   - The effect of STIMULUS CATEGORY [C] is statistically significant and positive
## (beta = 1.25, 95% CI [0.36, 2.14], p = 0.006; Std. beta = 1.25, 95% CI [0.36,
## 2.14])
##   - The effect of STIMULUS CATEGORY [D] is statistically significant and positive
## (beta = 1.33, 95% CI [0.45, 2.21], p = 0.003; Std. beta = 1.33, 95% CI [0.45,
## 2.21])
##   - The effect of BLOCK [B2] is statistically non-significant and positive (beta
## = 0.41, 95% CI [-0.49, 1.31], p = 0.370; Std. beta = 0.41, 95% CI [-0.49,
## 1.31])
##   - The effect of BLOCK [B3] is statistically non-significant and positive (beta
## = 0.31, 95% CI [-0.59, 1.21], p = 0.500; Std. beta = 0.31, 95% CI [-0.59,
## 1.21])
##   - The effect of BLOCK [B4] is statistically non-significant and negative (beta
## = -0.11, 95% CI [-1.01, 0.79], p = 0.814; Std. beta = -0.11, 95% CI [-1.01,
## 0.79])
##   - The effect of BLOCK [B5] is statistically non-significant and positive (beta
## = 0.20, 95% CI [-0.70, 1.11], p = 0.660; Std. beta = 0.20, 95% CI [-0.70,
## 1.11])
##   - The effect of BLOCK [B6] is statistically non-significant and positive (beta
## = 0.84, 95% CI [-0.06, 1.75], p = 0.068; Std. beta = 0.84, 95% CI [-0.06,
## 1.75])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B2] is statistically significant
## and negative (beta = -1.72, 95% CI [-2.99, -0.44], p = 0.008; Std. beta =
## -1.72, 95% CI [-2.99, -0.44])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B2] is statistically
## non-significant and positive (beta = 0.37, 95% CI [-1.05, 1.79], p = 0.607;
## Std. beta = 0.37, 95% CI [-1.05, 1.79])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B2] is statistically
## non-significant and negative (beta = -0.44, 95% CI [-1.71, 0.84], p = 0.501;
## Std. beta = -0.44, 95% CI [-1.71, 0.84])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B3] is statistically
## non-significant and negative (beta = -0.51, 95% CI [-1.76, 0.74], p = 0.425;
## Std. beta = -0.51, 95% CI [-1.76, 0.74])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B3] is statistically
## non-significant and negative (beta = -0.29, 95% CI [-1.53, 0.94], p = 0.640;
## Std. beta = -0.29, 95% CI [-1.53, 0.94])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B3] is statistically
## non-significant and negative (beta = -0.94, 95% CI [-2.21, 0.33], p = 0.146;
## Std. beta = -0.94, 95% CI [-2.21, 0.33])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B4] is statistically
## non-significant and negative (beta = -0.52, 95% CI [-1.76, 0.72], p = 0.411;
## Std. beta = -0.52, 95% CI [-1.76, 0.72])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B4] is statistically
## non-significant and negative (beta = -0.39, 95% CI [-1.65, 0.86], p = 0.539;
## Std. beta = -0.39, 95% CI [-1.65, 0.86])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B4] is statistically
## non-significant and negative (beta = -0.41, 95% CI [-1.65, 0.84], p = 0.523;
## Std. beta = -0.41, 95% CI [-1.65, 0.84])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B5] is statistically
## non-significant and negative (beta = -0.41, 95% CI [-1.66, 0.83], p = 0.516;
## Std. beta = -0.41, 95% CI [-1.66, 0.83])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B5] is statistically
## non-significant and negative (beta = -1.16, 95% CI [-2.42, 0.11], p = 0.073;
## Std. beta = -1.16, 95% CI [-2.42, 0.11])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B5] is statistically
## non-significant and positive (beta = 0.04, 95% CI [-1.25, 1.34], p = 0.948;
## Std. beta = 0.04, 95% CI [-1.25, 1.34])
##   - The effect of STIMULUS CATEGORY [B] × BLOCK [B6] is statistically significant
## and negative (beta = -2.42, 95% CI [-3.69, -1.16], p < .001; Std. beta = -2.42,
## 95% CI [-3.69, -1.16])
##   - The effect of STIMULUS CATEGORY [C] × BLOCK [B6] is statistically
## non-significant and negative (beta = -0.41, 95% CI [-1.72, 0.90], p = 0.539;
## Std. beta = -0.41, 95% CI [-1.72, 0.90])
##   - The effect of STIMULUS CATEGORY [D] × BLOCK [B6] is statistically
## non-significant and negative (beta = -0.66, 95% CI [-1.92, 0.60], p = 0.305;
## Std. beta = -0.66, 95% CI [-1.92, 0.60])
## 
## Standardized parameters were obtained by fitting the model on a standardized
## version of the dataset. 95% Confidence Intervals (CIs) and p-values were
## computed using a Wald z-distribution approximation.
######### PRINT COEFFICIENTS 
# print("COEFFICIENT ESTIMATES — LOG ODDS")
# tidy(m_best)
print("COEFFICIENT ESTIMATES — ODDS RATIOS")
## [1] "COEFFICIENT ESTIMATES — ODDS RATIOS"
tidy(m_best, exponentiate=TRUE)
## # A tibble: 26 × 7
##    effect group term               estimate std.error statistic  p.value
##    <chr>  <chr> <chr>                 <dbl>     <dbl>     <dbl>    <dbl>
##  1 fixed  <NA>  (Intercept)           0.680     0.220    -1.19  2.33e- 1
##  2 fixed  <NA>  CHART_LIKE_Z          3.05      0.282    12.1   1.36e-33
##  3 fixed  <NA>  STIMULUS_CATEGORYB    3.01      1.35      2.46  1.41e- 2
##  4 fixed  <NA>  STIMULUS_CATEGORYC    3.49      1.58      2.76  5.86e- 3
##  5 fixed  <NA>  STIMULUS_CATEGORYD    3.76      1.69      2.95  3.15e- 3
##  6 fixed  <NA>  BLOCKB2               1.51      0.693     0.897 3.70e- 1
##  7 fixed  <NA>  BLOCKB3               1.36      0.626     0.675 5.00e- 1
##  8 fixed  <NA>  BLOCKB4               0.898     0.411    -0.235 8.14e- 1
##  9 fixed  <NA>  BLOCKB5               1.23      0.568     0.440 6.60e- 1
## 10 fixed  <NA>  BLOCKB6               2.32      1.07      1.82  6.83e- 2
## # ℹ 16 more rows

3.0.2.4 model vis

############ VISUALIZE MODEL COEFFICIENTS 
#SJPLOT | MODEL | ODDS RATIO
#library(sjPlot)
plot_model(m_best, type = "est",
           vline.color = "red", 
           show.intercept = TRUE, 
           show.values = TRUE) + theme_minimal() + 
  labs(title = "Model Predicted Odds Ratio for ENCOUNTER",
       subtitle = "")

############ VISUALIZE MODEL PREDICTIONS
#SJPLOT | MODEL | PROBABILITIES

plot_model(m_best, type="pred", 
           terms = c("CHART_LIKE_Z"), ci.lvl = 0.95) + theme_minimal() + 
  labs(title = "Estimated Marginal Means on ENCOUNTER",
       subtitle = "Probability of ENAGAGE steadily increases as a function of CHART_LIKE",
       caption = "predicted effect of CHART LIKE holding CATEGORY and BLOCK at weighted average")
## Data were 'prettified'. Consider using `terms="CHART_LIKE_Z [all]"` to
##   get smooth plots.

plot_model(m_best, type="pred", 
           terms = c("CHART_LIKE_Z", "STIMULUS_CATEGORY"), ci.lvl = 0.95) + theme_minimal() + 
  labs(title = "Estimated Marginal Means on ENCOUNTER",
       subtitle = "Increases as a function of CHART_LIKE, with CATEGORY A (least embellished) lower",
       caption = "predicted effect of CHART LIKE AT CATEGORY holding BLOCK at weighted average")
## Data were 'prettified'. Consider using `terms="CHART_LIKE_Z [all]"` to
##   get smooth plots.

plot_model(m_best, type="pred", 
           terms = c("CHART_LIKE_Z", "BLOCK"), ci.lvl = 0.95) + theme_minimal() + 
      labs(title = "Estimated Marginal Means on ENCOUNTER",
           subtitle = "Steady increases by CHART_LIKE, little diff by block",
          caption = "predicted effect of CHART LIKE AT BLOCK holding CATEGORY at weighted average")
## Data were 'prettified'. Consider using `terms="CHART_LIKE_Z [all]"` to
##   get smooth plots.

plot_model(m_best, type="pred", 
           terms = c("CHART_LIKE_Z","STIMULUS_CATEGORY","BLOCK"), ci.lvl = 0.95) + theme_minimal() + 
  labs(title = "Estimated Marginal Means on ENCOUNTER",
       subtitle = "Steady increase by CHART_LIKE, with CATEGORY differences differing by BLOCK",
       caption = "predicted effect conditioned on all predictors")
## Data were 'prettified'. Consider using `terms="CHART_LIKE_Z [all]"` to
##   get smooth plots.

## MANUAL PREDICTION INTERACTION PLOT [bc sjPlot cant facet argh]
means <- estimate_means(m_best, at=c("CHART_LIKE_Z","BLOCK","STIMULUS_CATEGORY"), transform = "response",
                        backend="emmeans")
m <- as_tibble(means)

## CUSTOM PREDICTIONS PLOT
m %>% ggplot( aes(x = CHART_LIKE_Z, y = Probability, color=STIMULUS_CATEGORY, fill=STIMULUS_CATEGORY)) +
  geom_ribbon(aes(x=CHART_LIKE_Z, ymin = CI_low, ymax=CI_high), alpha= 0.5) + 
  geom_linerange(aes(ymin = CI_low, ymax=CI_high)) +
  geom_point() +
  scale_y_continuous(limits = c(0,1))+
  facet_grid(BLOCK ~ STIMULUS_CATEGORY) +
  # facet_wrap(~BLOCK) +
  theme_minimal() + easy_remove_legend() + 
  labs(title = "MODEL PREDICTED Probability of ENGAGE (rather than scroll)",
      subtitle = "Steady increase by CHART_LIKE, with CATEGORY differences differing by BLOCK",
       caption = "ENCONTER ~ CHART_LIKE_Z + CATEGORY * BLOCK + (1|PID")

3.0.3 CHART ACTION ~ CATEGORY

3.0.4 PICKUP HERE

df <- df_actions %>% 
  select(STIMULUS, STIMULUS_CATEGORY, BLOCK, CHART_ACTION, CHART_LIKE, PID) %>% 
  mutate(
CHART_ACTION = fct_rev(CHART_ACTION),
STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY),
  ) %>% filter(STIMULUS != "B0-0")


# m <- glm(df)


## CATEGORY
## GGSTATSPLOT
##############################
ggbarstats( data = df, x = CHART_ACTION, y = STIMULUS_CATEGORY,
        results.subtitle = FALSE) + 
scale_fill_manual(values = my_palettes(name="actions", direction = "1")) +
theme_minimal() +
# labs( title = "",  x = "", y="") + 
theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

##############################

## BLOCK
## GGSTATSPLOT
##############################
ggbarstats( data = df, x = CHART_ACTION, y = BLOCK,
        results.subtitle = FALSE) + 
scale_fill_manual(values = my_palettes(name="actions", direction = "1")) +
theme_minimal() +
# labs( title = "",  x = "", y="") + 
theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

##############################


## CATEGORY / BLOCK
# GGSTATSPLOT
##############################
grouped_ggbarstats( data = df, x = CHART_ACTION, y = STIMULUS_CATEGORY,   grouping.var=BLOCK,
                results.subtitle = FALSE,
                ggplot.component = scale_fill_manual(values = my_palettes(name="actions", direction = "1"))) + 
theme_minimal() +
# labs( title = "",  x = "", y="") + 
theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

##############################


# BLOCK / CATEGORY
# GGSTATSPLOT
##############################
grouped_ggbarstats( data = df, x = CHART_ACTION, y = BLOCK,   grouping.var=STIMULUS_CATEGORY,
                results.subtitle = FALSE,
                ggplot.component = scale_fill_manual(values = my_palettes(name="actions", direction = "1"))) + 
theme_minimal() +
# labs( title = "",  x = "", y="") + 
theme(aspect.ratio = 1)
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.
## Scale for fill is already present.
## Adding another scale for fill, which will replace the existing scale.

##############################

# STIMULUS
# GGSTATSPLOT
# TODO STACKED BAR BY ACTION

3.0.5 WIP DATA AND DESIGN BY CATEGORY and BLOCK

3.0.5.1 visualization

df <- df_graphs %>% 
  mutate(
    ## reverse order of MAKER_DATA, because scale ranged from 0=expert to 100=layperson
    ## we want the reverse 
    ## chose NOT to z-score data, bc we want the data in terms of the original scale 
    r_MAKER_DATA = reverse_scale(MAKER_DATA),
    STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY)
  ) %>% filter(STIMULUS!="B0-0") %>% 
  group_by(STIMULUS_CATEGORY, BLOCK) %>% 
  mutate(
    m=mean(MAKER_DATA), 
    md=median(MAKER_DATA)
  )

df %>% ggplot(aes(x=MAKER_DATA,  y=BLOCK))+
  geom_density_ridges( scale = 0.75) + 
  # ##MEDIAN
  # stat_summary(fun=median, geom="text", colour="red",  fontface = "bold", size = 2.5,
  #              vjust=+2, hjust = 0, aes( label=round(md, digits=0)))+
  # stat_summary(fun=median, geom="point", shape=20, size=3, color="red", fill="red") +
  ## MEAN
  stat_summary(fun=mean, geom="text", colour="blue",  fontface = "bold", size = 2.5,
               vjust=+2, hjust = 0, aes( label=round(m, digits=0)))+
  stat_summary(fun="mean", geom="point", shape=20, size=3, color="blue", fill="blue") +
  facet_wrap(~STIMULUS_CATEGORY)+ 
  labs(title = "MAKER_DATA by BLOCK AND CATEGORY", caption="(mean in blue)")+
  theme_minimal() + easy_remove_legend()
## Picking joint bandwidth of 8.98
## Picking joint bandwidth of 9.15
## Picking joint bandwidth of 9.02
## Picking joint bandwidth of 9.8

3.0.5.2 models

3.0.5.2.1 MAKER_DATA BY CATEGORY
### LINEAR MIXED EFFECTS MODEL ##################

df <- df_graphs %>% 
  mutate(
    ## reverse order of MAKER_DATA, because scale ranged from 0=expert to 100=layperson
    ## we want the reverse 
    ## chose NOT to z-score data, bc we want the data in terms of the original scale 
    r_MAKER_DATA = reverse_scale(MAKER_DATA),
    STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY)
  ) %>% filter(STIMULUS!="B0-0")

## SET CONTRASTS
# contrasts(df$MAKER_ID) <-car::contr.Treatment(levels(df$MAKER_ID)) # intercept first group mean; coeff dif from first

## DEFINE MODEL
mr1 <-lmer(r_MAKER_DATA ~  (1|PID) , data=df)
mr2 <-lmer(r_MAKER_DATA ~  (1|PID) + (1|STIMULUS), data=df)
mm1 <-lmer(r_MAKER_DATA ~  STIMULUS +  (1|PID) , data=df)
mm2 <-lmer(r_MAKER_DATA ~  STIMULUS_CATEGORY +  (1|PID) , data=df)
mm3 <-lmer(r_MAKER_DATA ~  BLOCK +  (1|PID) , data=df)
mm4 <-lmer(r_MAKER_DATA ~  STIMULUS_CATEGORY*BLOCK +  (1|PID) , data=df)



## sig diff between categories?
print("PREDICTED BY CATEGORY?")
## [1] "PREDICTED BY CATEGORY?"
print("we do expect to see some difference between categories, likely between A and D, however, variance within each category should be substantial")
## [1] "we do expect to see some difference between categories, likely between A and D, however, variance within each category should be substantial"
f <- "MAKER_DATA ~ STIMULUS_CATEGORY"
anova(mm2)
## Type III Analysis of Variance Table with Satterthwaite's method
##                   Sum Sq Mean Sq NumDF DenDF F value                Pr(>F)    
## STIMULUS_CATEGORY  57625   19208     3   951  30.447 < 0.00000000000000022 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2 = r2(mm2)
means <- estimate_means(mm2, at="STIMULUS_CATEGORY")
contrasts <- estimate_contrasts(mm2, contrast="STIMULUS_CATEGORY",method="pairwise")
plot(contrasts, means) + 
    geom_text(aes(x=means$STIMULUS_CATEGORY, y=means$Mean, label=round(means$Mean,2)), color="blue", position = position_nudge(x=0.25)) + 
  theme_minimal() + labs(caption =f, y="predicted MAKER DATA COMPETENCY \n (0=layperson, 100=expert)", 
                         subtitle=paste0("R2 marginal ",round(r2$R2_marginal*100,2),"%"))

print("PREDICTED BY BLOCK")
## [1] "PREDICTED BY BLOCK"
print("we do not expect to see sig diffs btwn blocks if they are aesthetically balanced")
## [1] "we do not expect to see sig diffs btwn blocks if they are aesthetically balanced"
f <- "MAKER_DATA ~ STIMULUS_CATEGORY * BLOCK + (1|PID)"
anova(mm3)
## Type III Analysis of Variance Table with Satterthwaite's method
##       Sum Sq Mean Sq NumDF DenDF F value      Pr(>F)    
## BLOCK  25396  5079.2     5   312  7.3686 0.000001511 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2 = r2(mm3)
means <- estimate_means(mm3, at="BLOCK")
contrasts <- estimate_contrasts(mm3, contrast="BLOCK",method="pairwise")
plot(contrasts, means) + 
    geom_text(aes(x=means$BLOCK, y=means$Mean, label=round(means$Mean,2)), color="blue", position = position_nudge(x=0.25)) + 
  theme_minimal() + labs(caption =f, y="predicted MAKER DATA COMPETENCY \n (0=layperson, 100=expert)", 
                         subtitle=paste0("R2 marginal ",round(r2$R2_marginal*100,2),"%"))

print("PREDICTED BY INTERACTION")
## [1] "PREDICTED BY INTERACTION"
print("")
## [1] ""
f <- "MAKER_DATA ~ STIMULUS_CATEGORY"
anova(mm4)
## Type III Analysis of Variance Table with Satterthwaite's method
##                         Sum Sq Mean Sq NumDF DenDF F value
## STIMULUS_CATEGORY        57577 19192.4     3   936 38.7034
## BLOCK                    18270  3654.0     5   312  7.3686
## STIMULUS_CATEGORY:BLOCK 135818  9054.6    15   936 18.2594
##                                        Pr(>F)    
## STIMULUS_CATEGORY       < 0.00000000000000022 ***
## BLOCK                             0.000001511 ***
## STIMULUS_CATEGORY:BLOCK < 0.00000000000000022 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2 = r2(mm4)
means <- estimate_means(mm4, at=c("STIMULUS_CATEGORY","BLOCK"))
contrasts <- estimate_contrasts(mm4, c("STIMULUS_CATEGORY","BLOCK"),method="pairwise")
plot(contrasts, means) + facet_wrap("BLOCK")+
    # geom_text(aes(x=means$BLOCK, y=means$Mean, label=round(means$Mean,2)), color="blue", position = position_nudge(x=0.25)) + 
  theme_minimal() + labs(caption =f, y="predicted MAKER DATA COMPETENCY \n (0=layperson, 100=expert)", 
                         subtitle=paste0("R2 marginal ",round(r2$R2_marginal*100,2),"%"))

## TEST MODEL FIT 
# test_performance(mm2,mm3)    
# test_performance(mm2,mm4)    
# test_performance(mm3,mm4)    
anova(mm2,mm3)
## refitting model(s) with ML (instead of REML)
## Data: df
## Models:
## mm2: r_MAKER_DATA ~ STIMULUS_CATEGORY + (1 | PID)
## mm3: r_MAKER_DATA ~ BLOCK + (1 | PID)
##     npar   AIC   BIC  logLik deviance Chisq Df Pr(>Chisq)
## mm2    6 11942 11973 -5965.1    11930                    
## mm3    8 11998 12040 -5991.1    11982     0  2          1
print("the model with CATEGORY is not a significantly better fit than the model with BLOCK")
## [1] "the model with CATEGORY is not a significantly better fit than the model with BLOCK"
test_likelihoodratio(mm2, mm4)
## # Likelihood-Ratio-Test (LRT) for Model Comparison (ML-estimator)
## 
## Name |           Model | df | df_diff |   Chi2 |      p
## -------------------------------------------------------
## mm2  | lmerModLmerTest |  6 |         |        |       
## mm4  | lmerModLmerTest | 26 |      20 | 280.36 | < .001
print("interaction better fit than category")
## [1] "interaction better fit than category"
test_likelihoodratio(mm3, mm4)
## # Likelihood-Ratio-Test (LRT) for Model Comparison (ML-estimator)
## 
## Name |           Model | df | df_diff |   Chi2 |      p
## -------------------------------------------------------
## mm3  | lmerModLmerTest |  8 |         |        |       
## mm4  | lmerModLmerTest | 26 |      18 | 332.40 | < .001
print("interaction better fit than block")
## [1] "interaction better fit than block"
compare_models(mm2,mm3,mm4)
## Parameter                          |                     mm2 |                   mm3 |                     mm4
## --------------------------------------------------------------------------------------------------------------
## (Intercept)                        |  70.55 ( 67.62,  73.47) | 56.97 ( 53.13, 60.82) |  64.95 ( 58.56,  71.34)
## STIMULUS CATEGORY (B)              |  -7.71 (-11.62,  -3.81) |                       |  -1.13 ( -9.46,   7.20)
## STIMULUS CATEGORY (C)              | -16.10 (-20.01, -12.19) |                       | -17.56 (-25.89,  -9.23)
## STIMULUS CATEGORY (D)              | -16.23 (-20.13, -12.32) |                       | -13.20 (-21.53,  -4.87)
## BLOCK (B2)                         |                         |  5.47 ( -0.05, 10.99) |   7.75 ( -1.42,  16.91)
## BLOCK (B3)                         |                         |  6.62 (  1.10, 12.14) |  10.59 (  1.43,  19.76)
## BLOCK (B4)                         |                         | 10.79 (  5.32, 16.26) |  14.11 (  5.03,  23.19)
## BLOCK (B5)                         |                         | -4.67 (-10.16,  0.82) |   4.66 ( -4.46,  13.78)
## BLOCK (B6)                         |                         |  3.26 ( -2.26,  8.78) |  -3.48 (-12.65,   5.68)
## STIMULUS CATEGORY (B) × BLOCK (B3) |                         |                       | -12.80 (-24.75,  -0.85)
## STIMULUS CATEGORY (B) × BLOCK (B2) |                         |                       | -17.22 (-29.17,  -5.27)
## STIMULUS CATEGORY (C) × BLOCK (B2) |                         |                       |  15.35 (  3.40,  27.30)
## STIMULUS CATEGORY (D) × BLOCK (B2) |                         |                       |  -7.24 (-19.19,   4.71)
## STIMULUS CATEGORY (C) × BLOCK (B4) |                         |                       |  15.93 (  4.10,  27.77)
## STIMULUS CATEGORY (C) × BLOCK (B3) |                         |                       | -10.40 (-22.35,   1.55)
## STIMULUS CATEGORY (D) × BLOCK (B3) |                         |                       |   7.32 ( -4.64,  19.27)
## STIMULUS CATEGORY (B) × BLOCK (B4) |                         |                       | -28.08 (-39.91, -16.24)
## STIMULUS CATEGORY (D) × BLOCK (B5) |                         |                       | -16.29 (-28.18,  -4.40)
## STIMULUS CATEGORY (D) × BLOCK (B4) |                         |                       |  -1.13 (-12.97,  10.70)
## STIMULUS CATEGORY (B) × BLOCK (B5) |                         |                       |   7.01 ( -4.88,  18.91)
## STIMULUS CATEGORY (C) × BLOCK (B5) |                         |                       | -28.04 (-39.93, -16.15)
## STIMULUS CATEGORY (B) × BLOCK (B6) |                         |                       |  11.74 ( -0.21,  23.69)
## STIMULUS CATEGORY (C) × BLOCK (B6) |                         |                       |  16.03 (  4.07,  27.98)
## STIMULUS CATEGORY (D) × BLOCK (B6) |                         |                       |  -0.80 (-12.75,  11.15)
## --------------------------------------------------------------------------------------------------------------
## Observations                       |                    1272 |                  1272 |                    1272
compare_performance(mr1, mr2, mm1,mm2,mm3,mm4, rank=TRUE)
## # Comparison of Model Performance Indices
## 
## Name |           Model | R2 (cond.) | R2 (marg.) |   ICC |   RMSE |  Sigma | AIC weights | AICc weights | BIC weights | Performance-Score
## -----------------------------------------------------------------------------------------------------------------------------------------
## mm1  | lmerModLmerTest |      0.348 |      0.232 | 0.150 | 20.885 | 22.268 |       0.500 |        0.500 |    5.59e-15 |            79.12%
## mm4  | lmerModLmerTest |      0.348 |      0.232 | 0.150 | 20.885 | 22.268 |       0.500 |        0.500 |    5.59e-15 |            79.12%
## mr2  | lmerModLmerTest |      0.346 |      0.000 | 0.346 | 20.898 | 22.270 |    2.27e-11 |     3.93e-11 |       1.000 |            62.36%
## mm2  | lmerModLmerTest |      0.160 |      0.060 | 0.106 | 24.046 | 25.117 |    3.20e-53 |     5.45e-53 |    8.19e-45 |            16.89%
## mm3  | lmerModLmerTest |      0.085 |      0.033 | 0.054 | 25.587 | 26.255 |    2.17e-65 |     3.60e-65 |    3.22e-59 |             1.94%
## mr1  | lmerModLmerTest |      0.081 |      0.000 | 0.081 | 25.380 | 26.255 |    6.45e-71 |     1.12e-70 |    3.72e-59 |             1.70%
f <- "MAKER_DATA ~ STIMULUS_CATEGORY * BLOCK + (1|PID)"


## PLOT BEST FIT MODEL PREDICTIONS
(p_data <- cat_plot(mm4, pred = BLOCK, modx = STIMULUS_CATEGORY,
         geom = "line", interval.geom= "linerange", 
         interval=TRUE, int.type = "confidence", int.width = 0.95, robust = TRUE,
         plot.points = FALSE) + 
    facet_wrap(~STIMULUS_CATEGORY) + 
    labs(title = "LMER Predictions | MAKER_DATA by BLOCK X CATEGORY", 
         caption = f, 
         y="MAKER_DATA \n 0(layerpson) --> 100 (professional)") + easy_remove_legend()
)

# if(graph_save){
#   ggsave(plot = p_data, path="figs/level_category/models", filename =paste0("lmer_maker_DATA_by_stimulus_category","_ixn.png"), units = c("in"))
# }


## PLOT MODEL PARAMETERS
plot_model(mm4, type = "est",
        # show.intercept = TRUE,
        show.values = TRUE,
        value.offset = .25,
        show.p = TRUE
) + theme_minimal() + labs(caption=f)

INTERPRETATION Here we see that a linear mixed effects model, predicting MAKER_DATA by the interaction of STIMULUS_CATEGORY and BLOCK indicates that ratings of maker data competencies do NOT vary consistently as a function of CATEGORY (i.e. the degree of ‘embellishment’). Although the degree of embellishment within a block (A,B,C,D) is the same, the ratings of maker data competency vary. This pattern is particularly salient in categories C and D (with more embellishment). These data suggest that social inferences about a maker’s data competency are not made solely based on the amount of embellishment, but rather, in response to the particular features of the visualization. A highly embellished chart might be rated with relatively high high data competency (e.g. B3-D) or lower data competency (eg. B5-D).

3.0.5.2.2 MAKER_DESIGN BY CATEGORY
df <- df_graphs %>% 
  mutate(
    ## reverse order of MAKER_DATA, because scale ranged from 0=expert to 100=layperson
    ## we want the reverse 
    ## chose NOT to z-score data, bc we want the data in terms of the original scale 
    r_MAKER_DESIGN = reverse_scale(MAKER_DESIGN),
    STIMULUS_CATEGORY = fct_rev(STIMULUS_CATEGORY)
  ) %>% filter(STIMULUS!="B0-0")


## DEFINE MODEL
mr1 <-lmer(r_MAKER_DESIGN ~  (1|PID) , data=df)
mr2 <-lmer(r_MAKER_DESIGN ~  (1|PID) + (1|STIMULUS), data=df)
mm1 <-lmer(r_MAKER_DESIGN ~  STIMULUS +  (1|PID) , data=df)
mm2 <-lmer(r_MAKER_DESIGN ~  STIMULUS_CATEGORY +  (1|PID) , data=df)
mm3 <-lmer(r_MAKER_DESIGN ~  BLOCK +  (1|PID) , data=df)
mm4 <-lmer(r_MAKER_DESIGN ~  STIMULUS_CATEGORY*BLOCK +  (1|PID) , data=df)



## sig diff between categories?
print("PREDICTED BY CATEGORY?")
## [1] "PREDICTED BY CATEGORY?"
print("we do expect to see some difference between categories, likely between A and D, however, variance within each category should be substantial")
## [1] "we do expect to see some difference between categories, likely between A and D, however, variance within each category should be substantial"
f <- "MAKER_DESIGN ~ STIMULUS_CATEGORY"
anova(mm2)
## Type III Analysis of Variance Table with Satterthwaite's method
##                   Sum Sq Mean Sq NumDF DenDF F value                Pr(>F)    
## STIMULUS_CATEGORY  87257   29086     3   951  43.882 < 0.00000000000000022 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2 = r2(mm2)
means <- estimate_means(mm2, at="STIMULUS_CATEGORY")
contrasts <- estimate_contrasts(mm2, contrast="STIMULUS_CATEGORY",method="pairwise")
plot(contrasts, means) + 
    geom_text(aes(x=means$STIMULUS_CATEGORY, y=means$Mean, label=round(means$Mean,2)), color="blue", position = position_nudge(x=0.25)) + 
  theme_minimal() + labs(caption =f, y="predicted MAKER DESIGN COMPETENCY \n (0=layperson, 100=expert)", 
                         subtitle=paste0("R2 marginal ",round(r2$R2_marginal*100,2),"%"))

print("PREDICTED BY BLOCK")
## [1] "PREDICTED BY BLOCK"
print("we do not expect to see sig diffs btwn blocks if they are aesthetically balanced")
## [1] "we do not expect to see sig diffs btwn blocks if they are aesthetically balanced"
f <- "MAKER_DESIGN ~ BLOCK"
anova(mm3)
## Type III Analysis of Variance Table with Satterthwaite's method
##       Sum Sq Mean Sq NumDF DenDF F value     Pr(>F)    
## BLOCK  21562  4312.5     5   312  5.7332 0.00004446 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2 = r2(mm3)
means <- estimate_means(mm3, at="BLOCK")
contrasts <- estimate_contrasts(mm3, contrast="BLOCK",method="pairwise")
plot(contrasts, means) + 
    geom_text(aes(x=means$BLOCK, y=means$Mean, label=round(means$Mean,2)), color="blue", position = position_nudge(x=0.25)) + 
  theme_minimal() + labs(caption =f, y="predicted MAKER DESIGN COMPETENCY \n (0=layperson, 100=expert)", 
                         subtitle=paste0("R2 marginal ",round(r2$R2_marginal*100,2),"%"))

print("PREDICTED BY INTERACTION")
## [1] "PREDICTED BY INTERACTION"
print("")
## [1] ""
f <- "MAKER_DESIGN ~ STIMULUS_CATEGORY * BLOCK + (1|PID)"
anova(mm4)
## Type III Analysis of Variance Table with Satterthwaite's method
##                         Sum Sq Mean Sq NumDF DenDF F value
## STIMULUS_CATEGORY        88006 29335.2     3   936 54.9818
## BLOCK                    15294  3058.9     5   312  5.7332
## STIMULUS_CATEGORY:BLOCK 130941  8729.4    15   936 16.3612
##                                        Pr(>F)    
## STIMULUS_CATEGORY       < 0.00000000000000022 ***
## BLOCK                              0.00004446 ***
## STIMULUS_CATEGORY:BLOCK < 0.00000000000000022 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2 = r2(mm4)
means <- estimate_means(mm4, at=c("BLOCK","STIMULUS_CATEGORY"))
contrasts <- estimate_contrasts(mm4, c("BLOCK","STIMULUS_CATEGORY"),method="pairwise")
plot(contrasts, means) + facet_wrap("STIMULUS_CATEGORY")+
    # geom_text(aes(x=means$BLOCK, y=means$Mean, label=round(means$Mean,2)), color="blue", position = position_nudge(x=0.25)) + 
  theme_minimal() + labs(caption =f, y="predicted MAKER DESIGN COMPETENCY \n (0=layperson, 100=expert)", 
                         subtitle=paste0("R2 marginal ",round(r2$R2_marginal*100,2),"%"))

## TEST MODEL FIT 
# test_performance(mm2,mm3)    
# test_performance(mm2,mm4)    
# test_performance(mm3,mm4)    
anova(mm2,mm3)
## refitting model(s) with ML (instead of REML)
## Data: df
## Models:
## mm2: r_MAKER_DESIGN ~ STIMULUS_CATEGORY + (1 | PID)
## mm3: r_MAKER_DESIGN ~ BLOCK + (1 | PID)
##     npar   AIC   BIC  logLik deviance Chisq Df Pr(>Chisq)
## mm2    6 12002 12032 -5994.8    11990                    
## mm3    8 12101 12142 -6042.6    12085     0  2          1
print("the model with CATEGORY is not a significantly better fit than the model with BLOCK")
## [1] "the model with CATEGORY is not a significantly better fit than the model with BLOCK"
test_likelihoodratio(mm2, mm4)
## # Likelihood-Ratio-Test (LRT) for Model Comparison (ML-estimator)
## 
## Name |           Model | df | df_diff |   Chi2 |      p
## -------------------------------------------------------
## mm2  | lmerModLmerTest |  6 |         |        |       
## mm4  | lmerModLmerTest | 26 |      20 | 250.10 | < .001
print("interaction better fit than category")
## [1] "interaction better fit than category"
test_likelihoodratio(mm3, mm4)
## # Likelihood-Ratio-Test (LRT) for Model Comparison (ML-estimator)
## 
## Name |           Model | df | df_diff |   Chi2 |      p
## -------------------------------------------------------
## mm3  | lmerModLmerTest |  8 |         |        |       
## mm4  | lmerModLmerTest | 26 |      18 | 345.87 | < .001
print("interaction better fit than block")
## [1] "interaction better fit than block"
compare_models(mm2,mm3,mm4)
## Parameter                          |                  mm2 |                   mm3 |                     mm4
## -----------------------------------------------------------------------------------------------------------
## (Intercept)                        | 47.57 (44.58, 50.56) | 54.07 ( 50.10, 58.04) |  57.91 ( 51.29,  64.52)
## STIMULUS CATEGORY (B)              | -0.26 (-4.26,  3.75) |                       |  -6.20 (-14.84,   2.44)
## STIMULUS CATEGORY (C)              |  5.50 ( 1.49,  9.51) |                       |  -9.40 (-18.04,  -0.76)
## STIMULUS CATEGORY (D)              | 20.12 (16.12, 24.13) |                       |   0.25 ( -8.39,   8.90)
## BLOCK (B2)                         |                      |  4.42 ( -1.27, 10.11) |  -4.97 (-14.46,   4.52)
## BLOCK (B3)                         |                      | -0.43 ( -6.12,  5.26) | -18.37 (-27.86,  -8.88)
## BLOCK (B4)                         |                      |  3.21 ( -2.43,  8.84) |  -8.09 (-17.49,   1.30)
## BLOCK (B5)                         |                      | -9.50 (-15.17, -3.84) | -19.46 (-28.90, -10.01)
## BLOCK (B6)                         |                      |  1.36 ( -4.34,  7.05) | -11.68 (-21.17,  -2.19)
## STIMULUS CATEGORY (B) × BLOCK (B3) |                      |                       |  25.85 ( 13.46,  38.25)
## STIMULUS CATEGORY (B) × BLOCK (B2) |                      |                       | -15.76 (-28.16,  -3.37)
## STIMULUS CATEGORY (C) × BLOCK (B2) |                      |                       |  33.84 ( 21.45,  46.24)
## STIMULUS CATEGORY (D) × BLOCK (B2) |                      |                       |  19.46 (  7.06,  31.85)
## STIMULUS CATEGORY (C) × BLOCK (B4) |                      |                       |  16.21 (  3.94,  28.49)
## STIMULUS CATEGORY (C) × BLOCK (B3) |                      |                       |   6.25 ( -6.15,  18.64)
## STIMULUS CATEGORY (D) × BLOCK (B3) |                      |                       |  39.67 ( 27.27,  52.06)
## STIMULUS CATEGORY (B) × BLOCK (B4) |                      |                       |  -0.76 (-13.04,  11.51)
## STIMULUS CATEGORY (D) × BLOCK (B5) |                      |                       |  19.48 (  7.15,  31.82)
## STIMULUS CATEGORY (D) × BLOCK (B4) |                      |                       |  29.75 ( 17.47,  42.02)
## STIMULUS CATEGORY (B) × BLOCK (B5) |                      |                       |  18.43 (  6.09,  30.76)
## STIMULUS CATEGORY (C) × BLOCK (B5) |                      |                       |   1.91 (-10.43,  14.25)
## STIMULUS CATEGORY (B) × BLOCK (B6) |                      |                       |   8.26 ( -4.14,  20.65)
## STIMULUS CATEGORY (C) × BLOCK (B6) |                      |                       |  32.25 ( 19.85,  44.64)
## STIMULUS CATEGORY (D) × BLOCK (B6) |                      |                       |  11.63 ( -0.77,  24.03)
## -----------------------------------------------------------------------------------------------------------
## Observations                       |                 1272 |                  1272 |                    1272
compare_performance(mr1, mr2, mm1,mm2,mm3,mm4, rank=TRUE)
## # Comparison of Model Performance Indices
## 
## Name |           Model | R2 (cond.) | R2 (marg.) |   ICC |   RMSE |  Sigma | AIC weights | AICc weights | BIC weights | Performance-Score
## -----------------------------------------------------------------------------------------------------------------------------------------
## mm4  | lmerModLmerTest |      0.347 |      0.235 | 0.147 | 21.684 | 23.099 |       0.500 |        0.500 |    7.26e-15 |            79.16%
## mm1  | lmerModLmerTest |      0.347 |      0.235 | 0.147 | 21.684 | 23.099 |       0.500 |        0.500 |    7.26e-15 |            79.16%
## mr2  | lmerModLmerTest |      0.347 |      0.000 | 0.347 | 21.698 | 23.100 |    1.75e-11 |     3.03e-11 |       1.000 |            62.44%
## mm2  | lmerModLmerTest |      0.179 |      0.085 | 0.103 | 24.672 | 25.745 |    1.20e-46 |     2.03e-46 |    3.97e-38 |            21.94%
## mm3  | lmerModLmerTest |      0.071 |      0.025 | 0.047 | 26.801 | 27.426 |    2.58e-68 |     4.28e-68 |    4.97e-62 |             1.51%
## mr1  | lmerModLmerTest |      0.067 |      0.000 | 0.067 | 26.644 | 27.426 |    3.33e-72 |     5.79e-72 |    2.49e-60 |             1.21%
f <- "MAKER_DATA ~ STIMULUS_CATEGORY * BLOCK + (1|PID)"


## PLOT BEST FIT MODEL PREDICTIONS
(p_design <- cat_plot(mm4, pred = BLOCK, modx = STIMULUS_CATEGORY,
         geom = "line", interval.geom= "linerange", 
         interval=TRUE, int.type = "confidence", int.width = 0.95, robust = TRUE,
         plot.points = FALSE) + 
    facet_wrap(~STIMULUS_CATEGORY) + 
    labs(title = "LMER Predictions | MAKER_DESIGN by BLOCK X CATEGORY", 
         caption = f, 
         y="MAKER_DESIGN \n 0(layerpson) --> 100 (professional)") + easy_remove_legend()
)

# if(graph_save){
#   ggsave(plot = p_design, path="figs/level_category/models", filename =paste0("lmer_maker_DESIGN_by_stimulus_category","_ixn.png"), units = c("in"))
# }


## PLOT MODEL PARAMETERS
plot_model(mm4, type = "est",
        # show.intercept = TRUE,
        show.values = TRUE,
        value.offset = .25,
        show.p = TRUE
) + theme_minimal() + labs(caption=f)

INTERPRETATION Here we see that a linear mixed effects model, predicting MAKER_DESIGN by the combination of STIMULUS_CATEGORY and BLOCK indicates that ratings of maker design competencies do NOT vary consistently as a function of CATEGORY (i.e. the degree of ‘embellishment’). Although the degree of embellishment within a block (A,B,C,D) is the same, the ratings of maker design competency vary. This pattern is particularly salient in category C. These data suggest that social inferences about a maker’s design competency are not made solely based on the amount of embellishment, but rather, in response to the particular features of the visualization. A highly embellished chart might be rated with relatively high design competency (e.g. B2-C) or lower data competency (eg. B5-C).